11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * Generic hugetlb support. 31da177e4SLinus Torvalds * (C) William Irwin, April 2004 41da177e4SLinus Torvalds */ 51da177e4SLinus Torvalds #include <linux/gfp.h> 61da177e4SLinus Torvalds #include <linux/list.h> 71da177e4SLinus Torvalds #include <linux/init.h> 81da177e4SLinus Torvalds #include <linux/module.h> 91da177e4SLinus Torvalds #include <linux/mm.h> 101da177e4SLinus Torvalds #include <linux/sysctl.h> 111da177e4SLinus Torvalds #include <linux/highmem.h> 121da177e4SLinus Torvalds #include <linux/nodemask.h> 1363551ae0SDavid Gibson #include <linux/pagemap.h> 145da7ca86SChristoph Lameter #include <linux/mempolicy.h> 15aea47ff3SChristoph Lameter #include <linux/cpuset.h> 163935baa9SDavid Gibson #include <linux/mutex.h> 175da7ca86SChristoph Lameter 1863551ae0SDavid Gibson #include <asm/page.h> 1963551ae0SDavid Gibson #include <asm/pgtable.h> 2063551ae0SDavid Gibson 2163551ae0SDavid Gibson #include <linux/hugetlb.h> 227835e98bSNick Piggin #include "internal.h" 231da177e4SLinus Torvalds 241da177e4SLinus Torvalds const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL; 25a43a8c39SChen, Kenneth W static unsigned long nr_huge_pages, free_huge_pages, resv_huge_pages; 267893d1d5SAdam Litke static unsigned long surplus_huge_pages; 27064d9efeSNishanth Aravamudan static unsigned long nr_overcommit_huge_pages; 281da177e4SLinus Torvalds unsigned long max_huge_pages; 29064d9efeSNishanth Aravamudan unsigned long sysctl_overcommit_huge_pages; 301da177e4SLinus Torvalds static struct list_head hugepage_freelists[MAX_NUMNODES]; 311da177e4SLinus Torvalds static unsigned int nr_huge_pages_node[MAX_NUMNODES]; 321da177e4SLinus Torvalds static unsigned int free_huge_pages_node[MAX_NUMNODES]; 337893d1d5SAdam Litke static unsigned int surplus_huge_pages_node[MAX_NUMNODES]; 34396faf03SMel Gorman static gfp_t htlb_alloc_mask = GFP_HIGHUSER; 35396faf03SMel Gorman unsigned long hugepages_treat_as_movable; 3663b4613cSNishanth Aravamudan static int hugetlb_next_nid; 37396faf03SMel Gorman 383935baa9SDavid Gibson /* 393935baa9SDavid Gibson * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages 403935baa9SDavid Gibson */ 413935baa9SDavid Gibson static DEFINE_SPINLOCK(hugetlb_lock); 420bd0f9fbSEric Paris 43e7c4b0bfSAndy Whitcroft /* 4496822904SAndy Whitcroft * Region tracking -- allows tracking of reservations and instantiated pages 4596822904SAndy Whitcroft * across the pages in a mapping. 4696822904SAndy Whitcroft */ 4796822904SAndy Whitcroft struct file_region { 4896822904SAndy Whitcroft struct list_head link; 4996822904SAndy Whitcroft long from; 5096822904SAndy Whitcroft long to; 5196822904SAndy Whitcroft }; 5296822904SAndy Whitcroft 5396822904SAndy Whitcroft static long region_add(struct list_head *head, long f, long t) 5496822904SAndy Whitcroft { 5596822904SAndy Whitcroft struct file_region *rg, *nrg, *trg; 5696822904SAndy Whitcroft 5796822904SAndy Whitcroft /* Locate the region we are either in or before. */ 5896822904SAndy Whitcroft list_for_each_entry(rg, head, link) 5996822904SAndy Whitcroft if (f <= rg->to) 6096822904SAndy Whitcroft break; 6196822904SAndy Whitcroft 6296822904SAndy Whitcroft /* Round our left edge to the current segment if it encloses us. */ 6396822904SAndy Whitcroft if (f > rg->from) 6496822904SAndy Whitcroft f = rg->from; 6596822904SAndy Whitcroft 6696822904SAndy Whitcroft /* Check for and consume any regions we now overlap with. */ 6796822904SAndy Whitcroft nrg = rg; 6896822904SAndy Whitcroft list_for_each_entry_safe(rg, trg, rg->link.prev, link) { 6996822904SAndy Whitcroft if (&rg->link == head) 7096822904SAndy Whitcroft break; 7196822904SAndy Whitcroft if (rg->from > t) 7296822904SAndy Whitcroft break; 7396822904SAndy Whitcroft 7496822904SAndy Whitcroft /* If this area reaches higher then extend our area to 7596822904SAndy Whitcroft * include it completely. If this is not the first area 7696822904SAndy Whitcroft * which we intend to reuse, free it. */ 7796822904SAndy Whitcroft if (rg->to > t) 7896822904SAndy Whitcroft t = rg->to; 7996822904SAndy Whitcroft if (rg != nrg) { 8096822904SAndy Whitcroft list_del(&rg->link); 8196822904SAndy Whitcroft kfree(rg); 8296822904SAndy Whitcroft } 8396822904SAndy Whitcroft } 8496822904SAndy Whitcroft nrg->from = f; 8596822904SAndy Whitcroft nrg->to = t; 8696822904SAndy Whitcroft return 0; 8796822904SAndy Whitcroft } 8896822904SAndy Whitcroft 8996822904SAndy Whitcroft static long region_chg(struct list_head *head, long f, long t) 9096822904SAndy Whitcroft { 9196822904SAndy Whitcroft struct file_region *rg, *nrg; 9296822904SAndy Whitcroft long chg = 0; 9396822904SAndy Whitcroft 9496822904SAndy Whitcroft /* Locate the region we are before or in. */ 9596822904SAndy Whitcroft list_for_each_entry(rg, head, link) 9696822904SAndy Whitcroft if (f <= rg->to) 9796822904SAndy Whitcroft break; 9896822904SAndy Whitcroft 9996822904SAndy Whitcroft /* If we are below the current region then a new region is required. 10096822904SAndy Whitcroft * Subtle, allocate a new region at the position but make it zero 10196822904SAndy Whitcroft * size such that we can guarantee to record the reservation. */ 10296822904SAndy Whitcroft if (&rg->link == head || t < rg->from) { 10396822904SAndy Whitcroft nrg = kmalloc(sizeof(*nrg), GFP_KERNEL); 10496822904SAndy Whitcroft if (!nrg) 10596822904SAndy Whitcroft return -ENOMEM; 10696822904SAndy Whitcroft nrg->from = f; 10796822904SAndy Whitcroft nrg->to = f; 10896822904SAndy Whitcroft INIT_LIST_HEAD(&nrg->link); 10996822904SAndy Whitcroft list_add(&nrg->link, rg->link.prev); 11096822904SAndy Whitcroft 11196822904SAndy Whitcroft return t - f; 11296822904SAndy Whitcroft } 11396822904SAndy Whitcroft 11496822904SAndy Whitcroft /* Round our left edge to the current segment if it encloses us. */ 11596822904SAndy Whitcroft if (f > rg->from) 11696822904SAndy Whitcroft f = rg->from; 11796822904SAndy Whitcroft chg = t - f; 11896822904SAndy Whitcroft 11996822904SAndy Whitcroft /* Check for and consume any regions we now overlap with. */ 12096822904SAndy Whitcroft list_for_each_entry(rg, rg->link.prev, link) { 12196822904SAndy Whitcroft if (&rg->link == head) 12296822904SAndy Whitcroft break; 12396822904SAndy Whitcroft if (rg->from > t) 12496822904SAndy Whitcroft return chg; 12596822904SAndy Whitcroft 12696822904SAndy Whitcroft /* We overlap with this area, if it extends futher than 12796822904SAndy Whitcroft * us then we must extend ourselves. Account for its 12896822904SAndy Whitcroft * existing reservation. */ 12996822904SAndy Whitcroft if (rg->to > t) { 13096822904SAndy Whitcroft chg += rg->to - t; 13196822904SAndy Whitcroft t = rg->to; 13296822904SAndy Whitcroft } 13396822904SAndy Whitcroft chg -= rg->to - rg->from; 13496822904SAndy Whitcroft } 13596822904SAndy Whitcroft return chg; 13696822904SAndy Whitcroft } 13796822904SAndy Whitcroft 13896822904SAndy Whitcroft static long region_truncate(struct list_head *head, long end) 13996822904SAndy Whitcroft { 14096822904SAndy Whitcroft struct file_region *rg, *trg; 14196822904SAndy Whitcroft long chg = 0; 14296822904SAndy Whitcroft 14396822904SAndy Whitcroft /* Locate the region we are either in or before. */ 14496822904SAndy Whitcroft list_for_each_entry(rg, head, link) 14596822904SAndy Whitcroft if (end <= rg->to) 14696822904SAndy Whitcroft break; 14796822904SAndy Whitcroft if (&rg->link == head) 14896822904SAndy Whitcroft return 0; 14996822904SAndy Whitcroft 15096822904SAndy Whitcroft /* If we are in the middle of a region then adjust it. */ 15196822904SAndy Whitcroft if (end > rg->from) { 15296822904SAndy Whitcroft chg = rg->to - end; 15396822904SAndy Whitcroft rg->to = end; 15496822904SAndy Whitcroft rg = list_entry(rg->link.next, typeof(*rg), link); 15596822904SAndy Whitcroft } 15696822904SAndy Whitcroft 15796822904SAndy Whitcroft /* Drop any remaining regions. */ 15896822904SAndy Whitcroft list_for_each_entry_safe(rg, trg, rg->link.prev, link) { 15996822904SAndy Whitcroft if (&rg->link == head) 16096822904SAndy Whitcroft break; 16196822904SAndy Whitcroft chg += rg->to - rg->from; 16296822904SAndy Whitcroft list_del(&rg->link); 16396822904SAndy Whitcroft kfree(rg); 16496822904SAndy Whitcroft } 16596822904SAndy Whitcroft return chg; 16696822904SAndy Whitcroft } 16796822904SAndy Whitcroft 16896822904SAndy Whitcroft /* 169e7c4b0bfSAndy Whitcroft * Convert the address within this vma to the page offset within 170e7c4b0bfSAndy Whitcroft * the mapping, in base page units. 171e7c4b0bfSAndy Whitcroft */ 172e7c4b0bfSAndy Whitcroft static pgoff_t vma_page_offset(struct vm_area_struct *vma, 173e7c4b0bfSAndy Whitcroft unsigned long address) 174e7c4b0bfSAndy Whitcroft { 175e7c4b0bfSAndy Whitcroft return ((address - vma->vm_start) >> PAGE_SHIFT) + 176e7c4b0bfSAndy Whitcroft (vma->vm_pgoff >> PAGE_SHIFT); 177e7c4b0bfSAndy Whitcroft } 178e7c4b0bfSAndy Whitcroft 179e7c4b0bfSAndy Whitcroft /* 180e7c4b0bfSAndy Whitcroft * Convert the address within this vma to the page offset within 181e7c4b0bfSAndy Whitcroft * the mapping, in pagecache page units; huge pages here. 182e7c4b0bfSAndy Whitcroft */ 183e7c4b0bfSAndy Whitcroft static pgoff_t vma_pagecache_offset(struct vm_area_struct *vma, 184e7c4b0bfSAndy Whitcroft unsigned long address) 185e7c4b0bfSAndy Whitcroft { 186e7c4b0bfSAndy Whitcroft return ((address - vma->vm_start) >> HPAGE_SHIFT) + 187e7c4b0bfSAndy Whitcroft (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT)); 188e7c4b0bfSAndy Whitcroft } 189e7c4b0bfSAndy Whitcroft 19004f2cbe3SMel Gorman #define HPAGE_RESV_OWNER (1UL << (BITS_PER_LONG - 1)) 19104f2cbe3SMel Gorman #define HPAGE_RESV_UNMAPPED (1UL << (BITS_PER_LONG - 2)) 19204f2cbe3SMel Gorman #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED) 193a1e78772SMel Gorman /* 194a1e78772SMel Gorman * These helpers are used to track how many pages are reserved for 195a1e78772SMel Gorman * faults in a MAP_PRIVATE mapping. Only the process that called mmap() 196a1e78772SMel Gorman * is guaranteed to have their future faults succeed. 197a1e78772SMel Gorman * 198a1e78772SMel Gorman * With the exception of reset_vma_resv_huge_pages() which is called at fork(), 199a1e78772SMel Gorman * the reserve counters are updated with the hugetlb_lock held. It is safe 200a1e78772SMel Gorman * to reset the VMA at fork() time as it is not in use yet and there is no 201a1e78772SMel Gorman * chance of the global counters getting corrupted as a result of the values. 202a1e78772SMel Gorman */ 203e7c4b0bfSAndy Whitcroft static unsigned long get_vma_private_data(struct vm_area_struct *vma) 204e7c4b0bfSAndy Whitcroft { 205e7c4b0bfSAndy Whitcroft return (unsigned long)vma->vm_private_data; 206e7c4b0bfSAndy Whitcroft } 207e7c4b0bfSAndy Whitcroft 208e7c4b0bfSAndy Whitcroft static void set_vma_private_data(struct vm_area_struct *vma, 209e7c4b0bfSAndy Whitcroft unsigned long value) 210e7c4b0bfSAndy Whitcroft { 211e7c4b0bfSAndy Whitcroft vma->vm_private_data = (void *)value; 212e7c4b0bfSAndy Whitcroft } 213e7c4b0bfSAndy Whitcroft 214a1e78772SMel Gorman static unsigned long vma_resv_huge_pages(struct vm_area_struct *vma) 215a1e78772SMel Gorman { 216a1e78772SMel Gorman VM_BUG_ON(!is_vm_hugetlb_page(vma)); 217a1e78772SMel Gorman if (!(vma->vm_flags & VM_SHARED)) 218e7c4b0bfSAndy Whitcroft return get_vma_private_data(vma) & ~HPAGE_RESV_MASK; 219a1e78772SMel Gorman return 0; 220a1e78772SMel Gorman } 221a1e78772SMel Gorman 222a1e78772SMel Gorman static void set_vma_resv_huge_pages(struct vm_area_struct *vma, 223a1e78772SMel Gorman unsigned long reserve) 224a1e78772SMel Gorman { 225a1e78772SMel Gorman VM_BUG_ON(!is_vm_hugetlb_page(vma)); 226a1e78772SMel Gorman VM_BUG_ON(vma->vm_flags & VM_SHARED); 227a1e78772SMel Gorman 228e7c4b0bfSAndy Whitcroft set_vma_private_data(vma, 229e7c4b0bfSAndy Whitcroft (get_vma_private_data(vma) & HPAGE_RESV_MASK) | reserve); 23004f2cbe3SMel Gorman } 23104f2cbe3SMel Gorman 23204f2cbe3SMel Gorman static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags) 23304f2cbe3SMel Gorman { 23404f2cbe3SMel Gorman VM_BUG_ON(!is_vm_hugetlb_page(vma)); 235e7c4b0bfSAndy Whitcroft VM_BUG_ON(vma->vm_flags & VM_SHARED); 236e7c4b0bfSAndy Whitcroft 237e7c4b0bfSAndy Whitcroft set_vma_private_data(vma, get_vma_private_data(vma) | flags); 23804f2cbe3SMel Gorman } 23904f2cbe3SMel Gorman 24004f2cbe3SMel Gorman static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag) 24104f2cbe3SMel Gorman { 24204f2cbe3SMel Gorman VM_BUG_ON(!is_vm_hugetlb_page(vma)); 243e7c4b0bfSAndy Whitcroft 244e7c4b0bfSAndy Whitcroft return (get_vma_private_data(vma) & flag) != 0; 245a1e78772SMel Gorman } 246a1e78772SMel Gorman 247a1e78772SMel Gorman /* Decrement the reserved pages in the hugepage pool by one */ 248a1e78772SMel Gorman static void decrement_hugepage_resv_vma(struct vm_area_struct *vma) 249a1e78772SMel Gorman { 250c37f9fb1SAndy Whitcroft if (vma->vm_flags & VM_NORESERVE) 251c37f9fb1SAndy Whitcroft return; 252c37f9fb1SAndy Whitcroft 253a1e78772SMel Gorman if (vma->vm_flags & VM_SHARED) { 254a1e78772SMel Gorman /* Shared mappings always use reserves */ 255a1e78772SMel Gorman resv_huge_pages--; 256a1e78772SMel Gorman } else { 257a1e78772SMel Gorman /* 258a1e78772SMel Gorman * Only the process that called mmap() has reserves for 259a1e78772SMel Gorman * private mappings. 260a1e78772SMel Gorman */ 26104f2cbe3SMel Gorman if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { 26204f2cbe3SMel Gorman unsigned long flags, reserve; 263a1e78772SMel Gorman resv_huge_pages--; 26404f2cbe3SMel Gorman flags = (unsigned long)vma->vm_private_data & 26504f2cbe3SMel Gorman HPAGE_RESV_MASK; 266a1e78772SMel Gorman reserve = (unsigned long)vma->vm_private_data - 1; 26704f2cbe3SMel Gorman vma->vm_private_data = (void *)(reserve | flags); 268a1e78772SMel Gorman } 269a1e78772SMel Gorman } 270a1e78772SMel Gorman } 271a1e78772SMel Gorman 27204f2cbe3SMel Gorman /* Reset counters to 0 and clear all HPAGE_RESV_* flags */ 273a1e78772SMel Gorman void reset_vma_resv_huge_pages(struct vm_area_struct *vma) 274a1e78772SMel Gorman { 275a1e78772SMel Gorman VM_BUG_ON(!is_vm_hugetlb_page(vma)); 276a1e78772SMel Gorman if (!(vma->vm_flags & VM_SHARED)) 277a1e78772SMel Gorman vma->vm_private_data = (void *)0; 278a1e78772SMel Gorman } 279a1e78772SMel Gorman 280a1e78772SMel Gorman /* Returns true if the VMA has associated reserve pages */ 281a1e78772SMel Gorman static int vma_has_private_reserves(struct vm_area_struct *vma) 282a1e78772SMel Gorman { 283a1e78772SMel Gorman if (vma->vm_flags & VM_SHARED) 284a1e78772SMel Gorman return 0; 285a1e78772SMel Gorman if (!vma_resv_huge_pages(vma)) 286a1e78772SMel Gorman return 0; 287a1e78772SMel Gorman return 1; 288a1e78772SMel Gorman } 289a1e78772SMel Gorman 29079ac6ba4SDavid Gibson static void clear_huge_page(struct page *page, unsigned long addr) 29179ac6ba4SDavid Gibson { 29279ac6ba4SDavid Gibson int i; 29379ac6ba4SDavid Gibson 29479ac6ba4SDavid Gibson might_sleep(); 29579ac6ba4SDavid Gibson for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); i++) { 29679ac6ba4SDavid Gibson cond_resched(); 297281e0e3bSRalf Baechle clear_user_highpage(page + i, addr + i * PAGE_SIZE); 29879ac6ba4SDavid Gibson } 29979ac6ba4SDavid Gibson } 30079ac6ba4SDavid Gibson 30179ac6ba4SDavid Gibson static void copy_huge_page(struct page *dst, struct page *src, 3029de455b2SAtsushi Nemoto unsigned long addr, struct vm_area_struct *vma) 30379ac6ba4SDavid Gibson { 30479ac6ba4SDavid Gibson int i; 30579ac6ba4SDavid Gibson 30679ac6ba4SDavid Gibson might_sleep(); 30779ac6ba4SDavid Gibson for (i = 0; i < HPAGE_SIZE/PAGE_SIZE; i++) { 30879ac6ba4SDavid Gibson cond_resched(); 3099de455b2SAtsushi Nemoto copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma); 31079ac6ba4SDavid Gibson } 31179ac6ba4SDavid Gibson } 31279ac6ba4SDavid Gibson 3131da177e4SLinus Torvalds static void enqueue_huge_page(struct page *page) 3141da177e4SLinus Torvalds { 3151da177e4SLinus Torvalds int nid = page_to_nid(page); 3161da177e4SLinus Torvalds list_add(&page->lru, &hugepage_freelists[nid]); 3171da177e4SLinus Torvalds free_huge_pages++; 3181da177e4SLinus Torvalds free_huge_pages_node[nid]++; 3191da177e4SLinus Torvalds } 3201da177e4SLinus Torvalds 321348e1e04SNishanth Aravamudan static struct page *dequeue_huge_page(void) 322348e1e04SNishanth Aravamudan { 323348e1e04SNishanth Aravamudan int nid; 324348e1e04SNishanth Aravamudan struct page *page = NULL; 325348e1e04SNishanth Aravamudan 326348e1e04SNishanth Aravamudan for (nid = 0; nid < MAX_NUMNODES; ++nid) { 327348e1e04SNishanth Aravamudan if (!list_empty(&hugepage_freelists[nid])) { 328348e1e04SNishanth Aravamudan page = list_entry(hugepage_freelists[nid].next, 329348e1e04SNishanth Aravamudan struct page, lru); 330348e1e04SNishanth Aravamudan list_del(&page->lru); 331348e1e04SNishanth Aravamudan free_huge_pages--; 332348e1e04SNishanth Aravamudan free_huge_pages_node[nid]--; 333348e1e04SNishanth Aravamudan break; 334348e1e04SNishanth Aravamudan } 335348e1e04SNishanth Aravamudan } 336348e1e04SNishanth Aravamudan return page; 337348e1e04SNishanth Aravamudan } 338348e1e04SNishanth Aravamudan 339348e1e04SNishanth Aravamudan static struct page *dequeue_huge_page_vma(struct vm_area_struct *vma, 34004f2cbe3SMel Gorman unsigned long address, int avoid_reserve) 3411da177e4SLinus Torvalds { 34231a5c6e4SNishanth Aravamudan int nid; 3431da177e4SLinus Torvalds struct page *page = NULL; 344480eccf9SLee Schermerhorn struct mempolicy *mpol; 34519770b32SMel Gorman nodemask_t *nodemask; 346396faf03SMel Gorman struct zonelist *zonelist = huge_zonelist(vma, address, 34719770b32SMel Gorman htlb_alloc_mask, &mpol, &nodemask); 348dd1a239fSMel Gorman struct zone *zone; 349dd1a239fSMel Gorman struct zoneref *z; 3501da177e4SLinus Torvalds 351a1e78772SMel Gorman /* 352a1e78772SMel Gorman * A child process with MAP_PRIVATE mappings created by their parent 353a1e78772SMel Gorman * have no page reserves. This check ensures that reservations are 354a1e78772SMel Gorman * not "stolen". The child may still get SIGKILLed 355a1e78772SMel Gorman */ 356a1e78772SMel Gorman if (!vma_has_private_reserves(vma) && 357a1e78772SMel Gorman free_huge_pages - resv_huge_pages == 0) 358a1e78772SMel Gorman return NULL; 359a1e78772SMel Gorman 36004f2cbe3SMel Gorman /* If reserves cannot be used, ensure enough pages are in the pool */ 36104f2cbe3SMel Gorman if (avoid_reserve && free_huge_pages - resv_huge_pages == 0) 36204f2cbe3SMel Gorman return NULL; 36304f2cbe3SMel Gorman 36419770b32SMel Gorman for_each_zone_zonelist_nodemask(zone, z, zonelist, 36519770b32SMel Gorman MAX_NR_ZONES - 1, nodemask) { 36654a6eb5cSMel Gorman nid = zone_to_nid(zone); 36754a6eb5cSMel Gorman if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask) && 3683abf7afdSAndrew Morton !list_empty(&hugepage_freelists[nid])) { 3691da177e4SLinus Torvalds page = list_entry(hugepage_freelists[nid].next, 3701da177e4SLinus Torvalds struct page, lru); 3711da177e4SLinus Torvalds list_del(&page->lru); 3721da177e4SLinus Torvalds free_huge_pages--; 3731da177e4SLinus Torvalds free_huge_pages_node[nid]--; 37404f2cbe3SMel Gorman 37504f2cbe3SMel Gorman if (!avoid_reserve) 376a1e78772SMel Gorman decrement_hugepage_resv_vma(vma); 377a1e78772SMel Gorman 3785ab3ee7bSKen Chen break; 3791da177e4SLinus Torvalds } 3803abf7afdSAndrew Morton } 38152cd3b07SLee Schermerhorn mpol_cond_put(mpol); 3821da177e4SLinus Torvalds return page; 3831da177e4SLinus Torvalds } 3841da177e4SLinus Torvalds 3856af2acb6SAdam Litke static void update_and_free_page(struct page *page) 3866af2acb6SAdam Litke { 3876af2acb6SAdam Litke int i; 3886af2acb6SAdam Litke nr_huge_pages--; 3896af2acb6SAdam Litke nr_huge_pages_node[page_to_nid(page)]--; 3906af2acb6SAdam Litke for (i = 0; i < (HPAGE_SIZE / PAGE_SIZE); i++) { 3916af2acb6SAdam Litke page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced | 3926af2acb6SAdam Litke 1 << PG_dirty | 1 << PG_active | 1 << PG_reserved | 3936af2acb6SAdam Litke 1 << PG_private | 1<< PG_writeback); 3946af2acb6SAdam Litke } 3956af2acb6SAdam Litke set_compound_page_dtor(page, NULL); 3966af2acb6SAdam Litke set_page_refcounted(page); 3977f2e9525SGerald Schaefer arch_release_hugepage(page); 3986af2acb6SAdam Litke __free_pages(page, HUGETLB_PAGE_ORDER); 3996af2acb6SAdam Litke } 4006af2acb6SAdam Litke 40127a85ef1SDavid Gibson static void free_huge_page(struct page *page) 40227a85ef1SDavid Gibson { 4037893d1d5SAdam Litke int nid = page_to_nid(page); 404c79fb75eSAdam Litke struct address_space *mapping; 40527a85ef1SDavid Gibson 406c79fb75eSAdam Litke mapping = (struct address_space *) page_private(page); 407e5df70abSAndy Whitcroft set_page_private(page, 0); 4087893d1d5SAdam Litke BUG_ON(page_count(page)); 40927a85ef1SDavid Gibson INIT_LIST_HEAD(&page->lru); 41027a85ef1SDavid Gibson 41127a85ef1SDavid Gibson spin_lock(&hugetlb_lock); 4127893d1d5SAdam Litke if (surplus_huge_pages_node[nid]) { 4137893d1d5SAdam Litke update_and_free_page(page); 4147893d1d5SAdam Litke surplus_huge_pages--; 4157893d1d5SAdam Litke surplus_huge_pages_node[nid]--; 4167893d1d5SAdam Litke } else { 41727a85ef1SDavid Gibson enqueue_huge_page(page); 4187893d1d5SAdam Litke } 41927a85ef1SDavid Gibson spin_unlock(&hugetlb_lock); 420c79fb75eSAdam Litke if (mapping) 4219a119c05SAdam Litke hugetlb_put_quota(mapping, 1); 42227a85ef1SDavid Gibson } 42327a85ef1SDavid Gibson 4247893d1d5SAdam Litke /* 4257893d1d5SAdam Litke * Increment or decrement surplus_huge_pages. Keep node-specific counters 4267893d1d5SAdam Litke * balanced by operating on them in a round-robin fashion. 4277893d1d5SAdam Litke * Returns 1 if an adjustment was made. 4287893d1d5SAdam Litke */ 4297893d1d5SAdam Litke static int adjust_pool_surplus(int delta) 4307893d1d5SAdam Litke { 4317893d1d5SAdam Litke static int prev_nid; 4327893d1d5SAdam Litke int nid = prev_nid; 4337893d1d5SAdam Litke int ret = 0; 4347893d1d5SAdam Litke 4357893d1d5SAdam Litke VM_BUG_ON(delta != -1 && delta != 1); 4367893d1d5SAdam Litke do { 4377893d1d5SAdam Litke nid = next_node(nid, node_online_map); 4387893d1d5SAdam Litke if (nid == MAX_NUMNODES) 4397893d1d5SAdam Litke nid = first_node(node_online_map); 4407893d1d5SAdam Litke 4417893d1d5SAdam Litke /* To shrink on this node, there must be a surplus page */ 4427893d1d5SAdam Litke if (delta < 0 && !surplus_huge_pages_node[nid]) 4437893d1d5SAdam Litke continue; 4447893d1d5SAdam Litke /* Surplus cannot exceed the total number of pages */ 4457893d1d5SAdam Litke if (delta > 0 && surplus_huge_pages_node[nid] >= 4467893d1d5SAdam Litke nr_huge_pages_node[nid]) 4477893d1d5SAdam Litke continue; 4487893d1d5SAdam Litke 4497893d1d5SAdam Litke surplus_huge_pages += delta; 4507893d1d5SAdam Litke surplus_huge_pages_node[nid] += delta; 4517893d1d5SAdam Litke ret = 1; 4527893d1d5SAdam Litke break; 4537893d1d5SAdam Litke } while (nid != prev_nid); 4547893d1d5SAdam Litke 4557893d1d5SAdam Litke prev_nid = nid; 4567893d1d5SAdam Litke return ret; 4577893d1d5SAdam Litke } 4587893d1d5SAdam Litke 45963b4613cSNishanth Aravamudan static struct page *alloc_fresh_huge_page_node(int nid) 4601da177e4SLinus Torvalds { 4611da177e4SLinus Torvalds struct page *page; 462f96efd58SJoe Jin 46363b4613cSNishanth Aravamudan page = alloc_pages_node(nid, 464551883aeSNishanth Aravamudan htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE| 465551883aeSNishanth Aravamudan __GFP_REPEAT|__GFP_NOWARN, 466f96efd58SJoe Jin HUGETLB_PAGE_ORDER); 4671da177e4SLinus Torvalds if (page) { 4687f2e9525SGerald Schaefer if (arch_prepare_hugepage(page)) { 4697f2e9525SGerald Schaefer __free_pages(page, HUGETLB_PAGE_ORDER); 4707b8ee84dSHarvey Harrison return NULL; 4717f2e9525SGerald Schaefer } 47233f2ef89SAndy Whitcroft set_compound_page_dtor(page, free_huge_page); 4730bd0f9fbSEric Paris spin_lock(&hugetlb_lock); 4741da177e4SLinus Torvalds nr_huge_pages++; 47563b4613cSNishanth Aravamudan nr_huge_pages_node[nid]++; 4760bd0f9fbSEric Paris spin_unlock(&hugetlb_lock); 477a482289dSNick Piggin put_page(page); /* free it into the hugepage allocator */ 4781da177e4SLinus Torvalds } 47963b4613cSNishanth Aravamudan 48063b4613cSNishanth Aravamudan return page; 48163b4613cSNishanth Aravamudan } 48263b4613cSNishanth Aravamudan 48363b4613cSNishanth Aravamudan static int alloc_fresh_huge_page(void) 48463b4613cSNishanth Aravamudan { 48563b4613cSNishanth Aravamudan struct page *page; 48663b4613cSNishanth Aravamudan int start_nid; 48763b4613cSNishanth Aravamudan int next_nid; 48863b4613cSNishanth Aravamudan int ret = 0; 48963b4613cSNishanth Aravamudan 49063b4613cSNishanth Aravamudan start_nid = hugetlb_next_nid; 49163b4613cSNishanth Aravamudan 49263b4613cSNishanth Aravamudan do { 49363b4613cSNishanth Aravamudan page = alloc_fresh_huge_page_node(hugetlb_next_nid); 49463b4613cSNishanth Aravamudan if (page) 49563b4613cSNishanth Aravamudan ret = 1; 49663b4613cSNishanth Aravamudan /* 49763b4613cSNishanth Aravamudan * Use a helper variable to find the next node and then 49863b4613cSNishanth Aravamudan * copy it back to hugetlb_next_nid afterwards: 49963b4613cSNishanth Aravamudan * otherwise there's a window in which a racer might 50063b4613cSNishanth Aravamudan * pass invalid nid MAX_NUMNODES to alloc_pages_node. 50163b4613cSNishanth Aravamudan * But we don't need to use a spin_lock here: it really 50263b4613cSNishanth Aravamudan * doesn't matter if occasionally a racer chooses the 50363b4613cSNishanth Aravamudan * same nid as we do. Move nid forward in the mask even 50463b4613cSNishanth Aravamudan * if we just successfully allocated a hugepage so that 50563b4613cSNishanth Aravamudan * the next caller gets hugepages on the next node. 50663b4613cSNishanth Aravamudan */ 50763b4613cSNishanth Aravamudan next_nid = next_node(hugetlb_next_nid, node_online_map); 50863b4613cSNishanth Aravamudan if (next_nid == MAX_NUMNODES) 50963b4613cSNishanth Aravamudan next_nid = first_node(node_online_map); 51063b4613cSNishanth Aravamudan hugetlb_next_nid = next_nid; 51163b4613cSNishanth Aravamudan } while (!page && hugetlb_next_nid != start_nid); 51263b4613cSNishanth Aravamudan 5133b116300SAdam Litke if (ret) 5143b116300SAdam Litke count_vm_event(HTLB_BUDDY_PGALLOC); 5153b116300SAdam Litke else 5163b116300SAdam Litke count_vm_event(HTLB_BUDDY_PGALLOC_FAIL); 5173b116300SAdam Litke 51863b4613cSNishanth Aravamudan return ret; 5191da177e4SLinus Torvalds } 5201da177e4SLinus Torvalds 5217893d1d5SAdam Litke static struct page *alloc_buddy_huge_page(struct vm_area_struct *vma, 5227893d1d5SAdam Litke unsigned long address) 5237893d1d5SAdam Litke { 5247893d1d5SAdam Litke struct page *page; 525d1c3fb1fSNishanth Aravamudan unsigned int nid; 5267893d1d5SAdam Litke 527d1c3fb1fSNishanth Aravamudan /* 528d1c3fb1fSNishanth Aravamudan * Assume we will successfully allocate the surplus page to 529d1c3fb1fSNishanth Aravamudan * prevent racing processes from causing the surplus to exceed 530d1c3fb1fSNishanth Aravamudan * overcommit 531d1c3fb1fSNishanth Aravamudan * 532d1c3fb1fSNishanth Aravamudan * This however introduces a different race, where a process B 533d1c3fb1fSNishanth Aravamudan * tries to grow the static hugepage pool while alloc_pages() is 534d1c3fb1fSNishanth Aravamudan * called by process A. B will only examine the per-node 535d1c3fb1fSNishanth Aravamudan * counters in determining if surplus huge pages can be 536d1c3fb1fSNishanth Aravamudan * converted to normal huge pages in adjust_pool_surplus(). A 537d1c3fb1fSNishanth Aravamudan * won't be able to increment the per-node counter, until the 538d1c3fb1fSNishanth Aravamudan * lock is dropped by B, but B doesn't drop hugetlb_lock until 539d1c3fb1fSNishanth Aravamudan * no more huge pages can be converted from surplus to normal 540d1c3fb1fSNishanth Aravamudan * state (and doesn't try to convert again). Thus, we have a 541d1c3fb1fSNishanth Aravamudan * case where a surplus huge page exists, the pool is grown, and 542d1c3fb1fSNishanth Aravamudan * the surplus huge page still exists after, even though it 543d1c3fb1fSNishanth Aravamudan * should just have been converted to a normal huge page. This 544d1c3fb1fSNishanth Aravamudan * does not leak memory, though, as the hugepage will be freed 545d1c3fb1fSNishanth Aravamudan * once it is out of use. It also does not allow the counters to 546d1c3fb1fSNishanth Aravamudan * go out of whack in adjust_pool_surplus() as we don't modify 547d1c3fb1fSNishanth Aravamudan * the node values until we've gotten the hugepage and only the 548d1c3fb1fSNishanth Aravamudan * per-node value is checked there. 549d1c3fb1fSNishanth Aravamudan */ 550d1c3fb1fSNishanth Aravamudan spin_lock(&hugetlb_lock); 551d1c3fb1fSNishanth Aravamudan if (surplus_huge_pages >= nr_overcommit_huge_pages) { 552d1c3fb1fSNishanth Aravamudan spin_unlock(&hugetlb_lock); 553d1c3fb1fSNishanth Aravamudan return NULL; 554d1c3fb1fSNishanth Aravamudan } else { 555d1c3fb1fSNishanth Aravamudan nr_huge_pages++; 556d1c3fb1fSNishanth Aravamudan surplus_huge_pages++; 557d1c3fb1fSNishanth Aravamudan } 558d1c3fb1fSNishanth Aravamudan spin_unlock(&hugetlb_lock); 559d1c3fb1fSNishanth Aravamudan 560551883aeSNishanth Aravamudan page = alloc_pages(htlb_alloc_mask|__GFP_COMP| 561551883aeSNishanth Aravamudan __GFP_REPEAT|__GFP_NOWARN, 5627893d1d5SAdam Litke HUGETLB_PAGE_ORDER); 563d1c3fb1fSNishanth Aravamudan 5647893d1d5SAdam Litke spin_lock(&hugetlb_lock); 565d1c3fb1fSNishanth Aravamudan if (page) { 5662668db91SAdam Litke /* 5672668db91SAdam Litke * This page is now managed by the hugetlb allocator and has 5682668db91SAdam Litke * no users -- drop the buddy allocator's reference. 5692668db91SAdam Litke */ 5702668db91SAdam Litke put_page_testzero(page); 5712668db91SAdam Litke VM_BUG_ON(page_count(page)); 572d1c3fb1fSNishanth Aravamudan nid = page_to_nid(page); 573d1c3fb1fSNishanth Aravamudan set_compound_page_dtor(page, free_huge_page); 574d1c3fb1fSNishanth Aravamudan /* 575d1c3fb1fSNishanth Aravamudan * We incremented the global counters already 576d1c3fb1fSNishanth Aravamudan */ 577d1c3fb1fSNishanth Aravamudan nr_huge_pages_node[nid]++; 578d1c3fb1fSNishanth Aravamudan surplus_huge_pages_node[nid]++; 5793b116300SAdam Litke __count_vm_event(HTLB_BUDDY_PGALLOC); 580d1c3fb1fSNishanth Aravamudan } else { 581d1c3fb1fSNishanth Aravamudan nr_huge_pages--; 582d1c3fb1fSNishanth Aravamudan surplus_huge_pages--; 5833b116300SAdam Litke __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL); 5847893d1d5SAdam Litke } 585d1c3fb1fSNishanth Aravamudan spin_unlock(&hugetlb_lock); 5867893d1d5SAdam Litke 5877893d1d5SAdam Litke return page; 5887893d1d5SAdam Litke } 5897893d1d5SAdam Litke 590e4e574b7SAdam Litke /* 591e4e574b7SAdam Litke * Increase the hugetlb pool such that it can accomodate a reservation 592e4e574b7SAdam Litke * of size 'delta'. 593e4e574b7SAdam Litke */ 594e4e574b7SAdam Litke static int gather_surplus_pages(int delta) 595e4e574b7SAdam Litke { 596e4e574b7SAdam Litke struct list_head surplus_list; 597e4e574b7SAdam Litke struct page *page, *tmp; 598e4e574b7SAdam Litke int ret, i; 599e4e574b7SAdam Litke int needed, allocated; 600e4e574b7SAdam Litke 601e4e574b7SAdam Litke needed = (resv_huge_pages + delta) - free_huge_pages; 602ac09b3a1SAdam Litke if (needed <= 0) { 603ac09b3a1SAdam Litke resv_huge_pages += delta; 604e4e574b7SAdam Litke return 0; 605ac09b3a1SAdam Litke } 606e4e574b7SAdam Litke 607e4e574b7SAdam Litke allocated = 0; 608e4e574b7SAdam Litke INIT_LIST_HEAD(&surplus_list); 609e4e574b7SAdam Litke 610e4e574b7SAdam Litke ret = -ENOMEM; 611e4e574b7SAdam Litke retry: 612e4e574b7SAdam Litke spin_unlock(&hugetlb_lock); 613e4e574b7SAdam Litke for (i = 0; i < needed; i++) { 614e4e574b7SAdam Litke page = alloc_buddy_huge_page(NULL, 0); 615e4e574b7SAdam Litke if (!page) { 616e4e574b7SAdam Litke /* 617e4e574b7SAdam Litke * We were not able to allocate enough pages to 618e4e574b7SAdam Litke * satisfy the entire reservation so we free what 619e4e574b7SAdam Litke * we've allocated so far. 620e4e574b7SAdam Litke */ 621e4e574b7SAdam Litke spin_lock(&hugetlb_lock); 622e4e574b7SAdam Litke needed = 0; 623e4e574b7SAdam Litke goto free; 624e4e574b7SAdam Litke } 625e4e574b7SAdam Litke 626e4e574b7SAdam Litke list_add(&page->lru, &surplus_list); 627e4e574b7SAdam Litke } 628e4e574b7SAdam Litke allocated += needed; 629e4e574b7SAdam Litke 630e4e574b7SAdam Litke /* 631e4e574b7SAdam Litke * After retaking hugetlb_lock, we need to recalculate 'needed' 632e4e574b7SAdam Litke * because either resv_huge_pages or free_huge_pages may have changed. 633e4e574b7SAdam Litke */ 634e4e574b7SAdam Litke spin_lock(&hugetlb_lock); 635e4e574b7SAdam Litke needed = (resv_huge_pages + delta) - (free_huge_pages + allocated); 636e4e574b7SAdam Litke if (needed > 0) 637e4e574b7SAdam Litke goto retry; 638e4e574b7SAdam Litke 639e4e574b7SAdam Litke /* 640e4e574b7SAdam Litke * The surplus_list now contains _at_least_ the number of extra pages 641e4e574b7SAdam Litke * needed to accomodate the reservation. Add the appropriate number 642e4e574b7SAdam Litke * of pages to the hugetlb pool and free the extras back to the buddy 643ac09b3a1SAdam Litke * allocator. Commit the entire reservation here to prevent another 644ac09b3a1SAdam Litke * process from stealing the pages as they are added to the pool but 645ac09b3a1SAdam Litke * before they are reserved. 646e4e574b7SAdam Litke */ 647e4e574b7SAdam Litke needed += allocated; 648ac09b3a1SAdam Litke resv_huge_pages += delta; 649e4e574b7SAdam Litke ret = 0; 650e4e574b7SAdam Litke free: 65119fc3f0aSAdam Litke /* Free the needed pages to the hugetlb pool */ 65219fc3f0aSAdam Litke list_for_each_entry_safe(page, tmp, &surplus_list, lru) { 65319fc3f0aSAdam Litke if ((--needed) < 0) 65419fc3f0aSAdam Litke break; 65519fc3f0aSAdam Litke list_del(&page->lru); 65619fc3f0aSAdam Litke enqueue_huge_page(page); 65719fc3f0aSAdam Litke } 65819fc3f0aSAdam Litke 65919fc3f0aSAdam Litke /* Free unnecessary surplus pages to the buddy allocator */ 66019fc3f0aSAdam Litke if (!list_empty(&surplus_list)) { 66119fc3f0aSAdam Litke spin_unlock(&hugetlb_lock); 662e4e574b7SAdam Litke list_for_each_entry_safe(page, tmp, &surplus_list, lru) { 663e4e574b7SAdam Litke list_del(&page->lru); 664af767cbdSAdam Litke /* 6652668db91SAdam Litke * The page has a reference count of zero already, so 6662668db91SAdam Litke * call free_huge_page directly instead of using 6672668db91SAdam Litke * put_page. This must be done with hugetlb_lock 668af767cbdSAdam Litke * unlocked which is safe because free_huge_page takes 669af767cbdSAdam Litke * hugetlb_lock before deciding how to free the page. 670af767cbdSAdam Litke */ 6712668db91SAdam Litke free_huge_page(page); 672af767cbdSAdam Litke } 67319fc3f0aSAdam Litke spin_lock(&hugetlb_lock); 674e4e574b7SAdam Litke } 675e4e574b7SAdam Litke 676e4e574b7SAdam Litke return ret; 677e4e574b7SAdam Litke } 678e4e574b7SAdam Litke 679e4e574b7SAdam Litke /* 680e4e574b7SAdam Litke * When releasing a hugetlb pool reservation, any surplus pages that were 681e4e574b7SAdam Litke * allocated to satisfy the reservation must be explicitly freed if they were 682e4e574b7SAdam Litke * never used. 683e4e574b7SAdam Litke */ 6848cde045cSAdrian Bunk static void return_unused_surplus_pages(unsigned long unused_resv_pages) 685e4e574b7SAdam Litke { 686e4e574b7SAdam Litke static int nid = -1; 687e4e574b7SAdam Litke struct page *page; 688e4e574b7SAdam Litke unsigned long nr_pages; 689e4e574b7SAdam Litke 69011320d17SNishanth Aravamudan /* 69111320d17SNishanth Aravamudan * We want to release as many surplus pages as possible, spread 69211320d17SNishanth Aravamudan * evenly across all nodes. Iterate across all nodes until we 69311320d17SNishanth Aravamudan * can no longer free unreserved surplus pages. This occurs when 69411320d17SNishanth Aravamudan * the nodes with surplus pages have no free pages. 69511320d17SNishanth Aravamudan */ 69611320d17SNishanth Aravamudan unsigned long remaining_iterations = num_online_nodes(); 69711320d17SNishanth Aravamudan 698ac09b3a1SAdam Litke /* Uncommit the reservation */ 699ac09b3a1SAdam Litke resv_huge_pages -= unused_resv_pages; 700ac09b3a1SAdam Litke 701e4e574b7SAdam Litke nr_pages = min(unused_resv_pages, surplus_huge_pages); 702e4e574b7SAdam Litke 70311320d17SNishanth Aravamudan while (remaining_iterations-- && nr_pages) { 704e4e574b7SAdam Litke nid = next_node(nid, node_online_map); 705e4e574b7SAdam Litke if (nid == MAX_NUMNODES) 706e4e574b7SAdam Litke nid = first_node(node_online_map); 707e4e574b7SAdam Litke 708e4e574b7SAdam Litke if (!surplus_huge_pages_node[nid]) 709e4e574b7SAdam Litke continue; 710e4e574b7SAdam Litke 711e4e574b7SAdam Litke if (!list_empty(&hugepage_freelists[nid])) { 712e4e574b7SAdam Litke page = list_entry(hugepage_freelists[nid].next, 713e4e574b7SAdam Litke struct page, lru); 714e4e574b7SAdam Litke list_del(&page->lru); 715e4e574b7SAdam Litke update_and_free_page(page); 716e4e574b7SAdam Litke free_huge_pages--; 717e4e574b7SAdam Litke free_huge_pages_node[nid]--; 718e4e574b7SAdam Litke surplus_huge_pages--; 719e4e574b7SAdam Litke surplus_huge_pages_node[nid]--; 720e4e574b7SAdam Litke nr_pages--; 72111320d17SNishanth Aravamudan remaining_iterations = num_online_nodes(); 722e4e574b7SAdam Litke } 723e4e574b7SAdam Litke } 724e4e574b7SAdam Litke } 725e4e574b7SAdam Litke 726c37f9fb1SAndy Whitcroft /* 727c37f9fb1SAndy Whitcroft * Determine if the huge page at addr within the vma has an associated 728c37f9fb1SAndy Whitcroft * reservation. Where it does not we will need to logically increase 729c37f9fb1SAndy Whitcroft * reservation and actually increase quota before an allocation can occur. 730c37f9fb1SAndy Whitcroft * Where any new reservation would be required the reservation change is 731c37f9fb1SAndy Whitcroft * prepared, but not committed. Once the page has been quota'd allocated 732c37f9fb1SAndy Whitcroft * an instantiated the change should be committed via vma_commit_reservation. 733c37f9fb1SAndy Whitcroft * No action is required on failure. 734c37f9fb1SAndy Whitcroft */ 735c37f9fb1SAndy Whitcroft static int vma_needs_reservation(struct vm_area_struct *vma, unsigned long addr) 736c37f9fb1SAndy Whitcroft { 737c37f9fb1SAndy Whitcroft struct address_space *mapping = vma->vm_file->f_mapping; 738c37f9fb1SAndy Whitcroft struct inode *inode = mapping->host; 739c37f9fb1SAndy Whitcroft 740c37f9fb1SAndy Whitcroft if (vma->vm_flags & VM_SHARED) { 741c37f9fb1SAndy Whitcroft pgoff_t idx = vma_pagecache_offset(vma, addr); 742c37f9fb1SAndy Whitcroft return region_chg(&inode->i_mapping->private_list, 743c37f9fb1SAndy Whitcroft idx, idx + 1); 744c37f9fb1SAndy Whitcroft 745c37f9fb1SAndy Whitcroft } else { 746c37f9fb1SAndy Whitcroft if (!is_vma_resv_set(vma, HPAGE_RESV_OWNER)) 747c37f9fb1SAndy Whitcroft return 1; 748c37f9fb1SAndy Whitcroft } 749c37f9fb1SAndy Whitcroft 750c37f9fb1SAndy Whitcroft return 0; 751c37f9fb1SAndy Whitcroft } 752c37f9fb1SAndy Whitcroft static void vma_commit_reservation(struct vm_area_struct *vma, 753c37f9fb1SAndy Whitcroft unsigned long addr) 754c37f9fb1SAndy Whitcroft { 755c37f9fb1SAndy Whitcroft struct address_space *mapping = vma->vm_file->f_mapping; 756c37f9fb1SAndy Whitcroft struct inode *inode = mapping->host; 757c37f9fb1SAndy Whitcroft 758c37f9fb1SAndy Whitcroft if (vma->vm_flags & VM_SHARED) { 759c37f9fb1SAndy Whitcroft pgoff_t idx = vma_pagecache_offset(vma, addr); 760c37f9fb1SAndy Whitcroft region_add(&inode->i_mapping->private_list, idx, idx + 1); 761c37f9fb1SAndy Whitcroft } 762c37f9fb1SAndy Whitcroft } 763c37f9fb1SAndy Whitcroft 764348ea204SAdam Litke static struct page *alloc_huge_page(struct vm_area_struct *vma, 76504f2cbe3SMel Gorman unsigned long addr, int avoid_reserve) 766348ea204SAdam Litke { 767348ea204SAdam Litke struct page *page; 7682fc39cecSAdam Litke struct address_space *mapping = vma->vm_file->f_mapping; 769a1e78772SMel Gorman struct inode *inode = mapping->host; 770c37f9fb1SAndy Whitcroft unsigned int chg; 7712fc39cecSAdam Litke 772a1e78772SMel Gorman /* 773a1e78772SMel Gorman * Processes that did not create the mapping will have no reserves and 774a1e78772SMel Gorman * will not have accounted against quota. Check that the quota can be 775a1e78772SMel Gorman * made before satisfying the allocation 776c37f9fb1SAndy Whitcroft * MAP_NORESERVE mappings may also need pages and quota allocated 777c37f9fb1SAndy Whitcroft * if no reserve mapping overlaps. 778a1e78772SMel Gorman */ 779c37f9fb1SAndy Whitcroft chg = vma_needs_reservation(vma, addr); 780c37f9fb1SAndy Whitcroft if (chg < 0) 781c37f9fb1SAndy Whitcroft return ERR_PTR(chg); 782c37f9fb1SAndy Whitcroft if (chg) 783a1e78772SMel Gorman if (hugetlb_get_quota(inode->i_mapping, chg)) 784a1e78772SMel Gorman return ERR_PTR(-ENOSPC); 78590d8b7e6SAdam Litke 786a1e78772SMel Gorman spin_lock(&hugetlb_lock); 78704f2cbe3SMel Gorman page = dequeue_huge_page_vma(vma, addr, avoid_reserve); 788a1e78772SMel Gorman spin_unlock(&hugetlb_lock); 789a1e78772SMel Gorman 790a1e78772SMel Gorman if (!page) { 791a1e78772SMel Gorman page = alloc_buddy_huge_page(vma, addr); 792a1e78772SMel Gorman if (!page) { 793a1e78772SMel Gorman hugetlb_put_quota(inode->i_mapping, chg); 794a1e78772SMel Gorman return ERR_PTR(-VM_FAULT_OOM); 795a1e78772SMel Gorman } 796a1e78772SMel Gorman } 797a1e78772SMel Gorman 798348ea204SAdam Litke set_page_refcounted(page); 7992fc39cecSAdam Litke set_page_private(page, (unsigned long) mapping); 800a1e78772SMel Gorman 801c37f9fb1SAndy Whitcroft vma_commit_reservation(vma, addr); 802c37f9fb1SAndy Whitcroft 8037893d1d5SAdam Litke return page; 804b45b5bd6SDavid Gibson } 805b45b5bd6SDavid Gibson 8061da177e4SLinus Torvalds static int __init hugetlb_init(void) 8071da177e4SLinus Torvalds { 8081da177e4SLinus Torvalds unsigned long i; 8091da177e4SLinus Torvalds 8103c726f8dSBenjamin Herrenschmidt if (HPAGE_SHIFT == 0) 8113c726f8dSBenjamin Herrenschmidt return 0; 8123c726f8dSBenjamin Herrenschmidt 8131da177e4SLinus Torvalds for (i = 0; i < MAX_NUMNODES; ++i) 8141da177e4SLinus Torvalds INIT_LIST_HEAD(&hugepage_freelists[i]); 8151da177e4SLinus Torvalds 81663b4613cSNishanth Aravamudan hugetlb_next_nid = first_node(node_online_map); 81763b4613cSNishanth Aravamudan 8181da177e4SLinus Torvalds for (i = 0; i < max_huge_pages; ++i) { 819a482289dSNick Piggin if (!alloc_fresh_huge_page()) 8201da177e4SLinus Torvalds break; 8211da177e4SLinus Torvalds } 8221da177e4SLinus Torvalds max_huge_pages = free_huge_pages = nr_huge_pages = i; 8231da177e4SLinus Torvalds printk("Total HugeTLB memory allocated, %ld\n", free_huge_pages); 8241da177e4SLinus Torvalds return 0; 8251da177e4SLinus Torvalds } 8261da177e4SLinus Torvalds module_init(hugetlb_init); 8271da177e4SLinus Torvalds 8281da177e4SLinus Torvalds static int __init hugetlb_setup(char *s) 8291da177e4SLinus Torvalds { 8301da177e4SLinus Torvalds if (sscanf(s, "%lu", &max_huge_pages) <= 0) 8311da177e4SLinus Torvalds max_huge_pages = 0; 8321da177e4SLinus Torvalds return 1; 8331da177e4SLinus Torvalds } 8341da177e4SLinus Torvalds __setup("hugepages=", hugetlb_setup); 8351da177e4SLinus Torvalds 8368a630112SKen Chen static unsigned int cpuset_mems_nr(unsigned int *array) 8378a630112SKen Chen { 8388a630112SKen Chen int node; 8398a630112SKen Chen unsigned int nr = 0; 8408a630112SKen Chen 8418a630112SKen Chen for_each_node_mask(node, cpuset_current_mems_allowed) 8428a630112SKen Chen nr += array[node]; 8438a630112SKen Chen 8448a630112SKen Chen return nr; 8458a630112SKen Chen } 8468a630112SKen Chen 8471da177e4SLinus Torvalds #ifdef CONFIG_SYSCTL 8481da177e4SLinus Torvalds #ifdef CONFIG_HIGHMEM 8491da177e4SLinus Torvalds static void try_to_free_low(unsigned long count) 8501da177e4SLinus Torvalds { 8514415cc8dSChristoph Lameter int i; 8524415cc8dSChristoph Lameter 8531da177e4SLinus Torvalds for (i = 0; i < MAX_NUMNODES; ++i) { 8541da177e4SLinus Torvalds struct page *page, *next; 8551da177e4SLinus Torvalds list_for_each_entry_safe(page, next, &hugepage_freelists[i], lru) { 8566b0c880dSAdam Litke if (count >= nr_huge_pages) 8576b0c880dSAdam Litke return; 8581da177e4SLinus Torvalds if (PageHighMem(page)) 8591da177e4SLinus Torvalds continue; 8601da177e4SLinus Torvalds list_del(&page->lru); 8611da177e4SLinus Torvalds update_and_free_page(page); 8621da177e4SLinus Torvalds free_huge_pages--; 8634415cc8dSChristoph Lameter free_huge_pages_node[page_to_nid(page)]--; 8641da177e4SLinus Torvalds } 8651da177e4SLinus Torvalds } 8661da177e4SLinus Torvalds } 8671da177e4SLinus Torvalds #else 8681da177e4SLinus Torvalds static inline void try_to_free_low(unsigned long count) 8691da177e4SLinus Torvalds { 8701da177e4SLinus Torvalds } 8711da177e4SLinus Torvalds #endif 8721da177e4SLinus Torvalds 8737893d1d5SAdam Litke #define persistent_huge_pages (nr_huge_pages - surplus_huge_pages) 8741da177e4SLinus Torvalds static unsigned long set_max_huge_pages(unsigned long count) 8751da177e4SLinus Torvalds { 8767893d1d5SAdam Litke unsigned long min_count, ret; 8771da177e4SLinus Torvalds 8787893d1d5SAdam Litke /* 8797893d1d5SAdam Litke * Increase the pool size 8807893d1d5SAdam Litke * First take pages out of surplus state. Then make up the 8817893d1d5SAdam Litke * remaining difference by allocating fresh huge pages. 882d1c3fb1fSNishanth Aravamudan * 883d1c3fb1fSNishanth Aravamudan * We might race with alloc_buddy_huge_page() here and be unable 884d1c3fb1fSNishanth Aravamudan * to convert a surplus huge page to a normal huge page. That is 885d1c3fb1fSNishanth Aravamudan * not critical, though, it just means the overall size of the 886d1c3fb1fSNishanth Aravamudan * pool might be one hugepage larger than it needs to be, but 887d1c3fb1fSNishanth Aravamudan * within all the constraints specified by the sysctls. 8887893d1d5SAdam Litke */ 8891da177e4SLinus Torvalds spin_lock(&hugetlb_lock); 8907893d1d5SAdam Litke while (surplus_huge_pages && count > persistent_huge_pages) { 8917893d1d5SAdam Litke if (!adjust_pool_surplus(-1)) 8927893d1d5SAdam Litke break; 8937893d1d5SAdam Litke } 8947893d1d5SAdam Litke 8957893d1d5SAdam Litke while (count > persistent_huge_pages) { 8967893d1d5SAdam Litke /* 8977893d1d5SAdam Litke * If this allocation races such that we no longer need the 8987893d1d5SAdam Litke * page, free_huge_page will handle it by freeing the page 8997893d1d5SAdam Litke * and reducing the surplus. 9007893d1d5SAdam Litke */ 9017893d1d5SAdam Litke spin_unlock(&hugetlb_lock); 9027893d1d5SAdam Litke ret = alloc_fresh_huge_page(); 9037893d1d5SAdam Litke spin_lock(&hugetlb_lock); 9047893d1d5SAdam Litke if (!ret) 9057893d1d5SAdam Litke goto out; 9067893d1d5SAdam Litke 9077893d1d5SAdam Litke } 9087893d1d5SAdam Litke 9097893d1d5SAdam Litke /* 9107893d1d5SAdam Litke * Decrease the pool size 9117893d1d5SAdam Litke * First return free pages to the buddy allocator (being careful 9127893d1d5SAdam Litke * to keep enough around to satisfy reservations). Then place 9137893d1d5SAdam Litke * pages into surplus state as needed so the pool will shrink 9147893d1d5SAdam Litke * to the desired size as pages become free. 915d1c3fb1fSNishanth Aravamudan * 916d1c3fb1fSNishanth Aravamudan * By placing pages into the surplus state independent of the 917d1c3fb1fSNishanth Aravamudan * overcommit value, we are allowing the surplus pool size to 918d1c3fb1fSNishanth Aravamudan * exceed overcommit. There are few sane options here. Since 919d1c3fb1fSNishanth Aravamudan * alloc_buddy_huge_page() is checking the global counter, 920d1c3fb1fSNishanth Aravamudan * though, we'll note that we're not allowed to exceed surplus 921d1c3fb1fSNishanth Aravamudan * and won't grow the pool anywhere else. Not until one of the 922d1c3fb1fSNishanth Aravamudan * sysctls are changed, or the surplus pages go out of use. 9237893d1d5SAdam Litke */ 9246b0c880dSAdam Litke min_count = resv_huge_pages + nr_huge_pages - free_huge_pages; 9256b0c880dSAdam Litke min_count = max(count, min_count); 9267893d1d5SAdam Litke try_to_free_low(min_count); 9277893d1d5SAdam Litke while (min_count < persistent_huge_pages) { 928348e1e04SNishanth Aravamudan struct page *page = dequeue_huge_page(); 9291da177e4SLinus Torvalds if (!page) 9301da177e4SLinus Torvalds break; 9311da177e4SLinus Torvalds update_and_free_page(page); 9321da177e4SLinus Torvalds } 9337893d1d5SAdam Litke while (count < persistent_huge_pages) { 9347893d1d5SAdam Litke if (!adjust_pool_surplus(1)) 9357893d1d5SAdam Litke break; 9367893d1d5SAdam Litke } 9377893d1d5SAdam Litke out: 9387893d1d5SAdam Litke ret = persistent_huge_pages; 9391da177e4SLinus Torvalds spin_unlock(&hugetlb_lock); 9407893d1d5SAdam Litke return ret; 9411da177e4SLinus Torvalds } 9421da177e4SLinus Torvalds 9431da177e4SLinus Torvalds int hugetlb_sysctl_handler(struct ctl_table *table, int write, 9441da177e4SLinus Torvalds struct file *file, void __user *buffer, 9451da177e4SLinus Torvalds size_t *length, loff_t *ppos) 9461da177e4SLinus Torvalds { 9471da177e4SLinus Torvalds proc_doulongvec_minmax(table, write, file, buffer, length, ppos); 9481da177e4SLinus Torvalds max_huge_pages = set_max_huge_pages(max_huge_pages); 9491da177e4SLinus Torvalds return 0; 9501da177e4SLinus Torvalds } 951396faf03SMel Gorman 952396faf03SMel Gorman int hugetlb_treat_movable_handler(struct ctl_table *table, int write, 953396faf03SMel Gorman struct file *file, void __user *buffer, 954396faf03SMel Gorman size_t *length, loff_t *ppos) 955396faf03SMel Gorman { 956396faf03SMel Gorman proc_dointvec(table, write, file, buffer, length, ppos); 957396faf03SMel Gorman if (hugepages_treat_as_movable) 958396faf03SMel Gorman htlb_alloc_mask = GFP_HIGHUSER_MOVABLE; 959396faf03SMel Gorman else 960396faf03SMel Gorman htlb_alloc_mask = GFP_HIGHUSER; 961396faf03SMel Gorman return 0; 962396faf03SMel Gorman } 963396faf03SMel Gorman 964a3d0c6aaSNishanth Aravamudan int hugetlb_overcommit_handler(struct ctl_table *table, int write, 965a3d0c6aaSNishanth Aravamudan struct file *file, void __user *buffer, 966a3d0c6aaSNishanth Aravamudan size_t *length, loff_t *ppos) 967a3d0c6aaSNishanth Aravamudan { 968a3d0c6aaSNishanth Aravamudan proc_doulongvec_minmax(table, write, file, buffer, length, ppos); 969064d9efeSNishanth Aravamudan spin_lock(&hugetlb_lock); 970064d9efeSNishanth Aravamudan nr_overcommit_huge_pages = sysctl_overcommit_huge_pages; 971a3d0c6aaSNishanth Aravamudan spin_unlock(&hugetlb_lock); 972a3d0c6aaSNishanth Aravamudan return 0; 973a3d0c6aaSNishanth Aravamudan } 974a3d0c6aaSNishanth Aravamudan 9751da177e4SLinus Torvalds #endif /* CONFIG_SYSCTL */ 9761da177e4SLinus Torvalds 9771da177e4SLinus Torvalds int hugetlb_report_meminfo(char *buf) 9781da177e4SLinus Torvalds { 9791da177e4SLinus Torvalds return sprintf(buf, 9801da177e4SLinus Torvalds "HugePages_Total: %5lu\n" 9811da177e4SLinus Torvalds "HugePages_Free: %5lu\n" 982b45b5bd6SDavid Gibson "HugePages_Rsvd: %5lu\n" 9837893d1d5SAdam Litke "HugePages_Surp: %5lu\n" 9841da177e4SLinus Torvalds "Hugepagesize: %5lu kB\n", 9851da177e4SLinus Torvalds nr_huge_pages, 9861da177e4SLinus Torvalds free_huge_pages, 987a43a8c39SChen, Kenneth W resv_huge_pages, 9887893d1d5SAdam Litke surplus_huge_pages, 9891da177e4SLinus Torvalds HPAGE_SIZE/1024); 9901da177e4SLinus Torvalds } 9911da177e4SLinus Torvalds 9921da177e4SLinus Torvalds int hugetlb_report_node_meminfo(int nid, char *buf) 9931da177e4SLinus Torvalds { 9941da177e4SLinus Torvalds return sprintf(buf, 9951da177e4SLinus Torvalds "Node %d HugePages_Total: %5u\n" 996a1de0919SNishanth Aravamudan "Node %d HugePages_Free: %5u\n" 997a1de0919SNishanth Aravamudan "Node %d HugePages_Surp: %5u\n", 9981da177e4SLinus Torvalds nid, nr_huge_pages_node[nid], 999a1de0919SNishanth Aravamudan nid, free_huge_pages_node[nid], 1000a1de0919SNishanth Aravamudan nid, surplus_huge_pages_node[nid]); 10011da177e4SLinus Torvalds } 10021da177e4SLinus Torvalds 10031da177e4SLinus Torvalds /* Return the number pages of memory we physically have, in PAGE_SIZE units. */ 10041da177e4SLinus Torvalds unsigned long hugetlb_total_pages(void) 10051da177e4SLinus Torvalds { 10061da177e4SLinus Torvalds return nr_huge_pages * (HPAGE_SIZE / PAGE_SIZE); 10071da177e4SLinus Torvalds } 10081da177e4SLinus Torvalds 1009fc1b8a73SMel Gorman static int hugetlb_acct_memory(long delta) 1010fc1b8a73SMel Gorman { 1011fc1b8a73SMel Gorman int ret = -ENOMEM; 1012fc1b8a73SMel Gorman 1013fc1b8a73SMel Gorman spin_lock(&hugetlb_lock); 1014fc1b8a73SMel Gorman /* 1015fc1b8a73SMel Gorman * When cpuset is configured, it breaks the strict hugetlb page 1016fc1b8a73SMel Gorman * reservation as the accounting is done on a global variable. Such 1017fc1b8a73SMel Gorman * reservation is completely rubbish in the presence of cpuset because 1018fc1b8a73SMel Gorman * the reservation is not checked against page availability for the 1019fc1b8a73SMel Gorman * current cpuset. Application can still potentially OOM'ed by kernel 1020fc1b8a73SMel Gorman * with lack of free htlb page in cpuset that the task is in. 1021fc1b8a73SMel Gorman * Attempt to enforce strict accounting with cpuset is almost 1022fc1b8a73SMel Gorman * impossible (or too ugly) because cpuset is too fluid that 1023fc1b8a73SMel Gorman * task or memory node can be dynamically moved between cpusets. 1024fc1b8a73SMel Gorman * 1025fc1b8a73SMel Gorman * The change of semantics for shared hugetlb mapping with cpuset is 1026fc1b8a73SMel Gorman * undesirable. However, in order to preserve some of the semantics, 1027fc1b8a73SMel Gorman * we fall back to check against current free page availability as 1028fc1b8a73SMel Gorman * a best attempt and hopefully to minimize the impact of changing 1029fc1b8a73SMel Gorman * semantics that cpuset has. 1030fc1b8a73SMel Gorman */ 1031fc1b8a73SMel Gorman if (delta > 0) { 1032fc1b8a73SMel Gorman if (gather_surplus_pages(delta) < 0) 1033fc1b8a73SMel Gorman goto out; 1034fc1b8a73SMel Gorman 1035fc1b8a73SMel Gorman if (delta > cpuset_mems_nr(free_huge_pages_node)) { 1036fc1b8a73SMel Gorman return_unused_surplus_pages(delta); 1037fc1b8a73SMel Gorman goto out; 1038fc1b8a73SMel Gorman } 1039fc1b8a73SMel Gorman } 1040fc1b8a73SMel Gorman 1041fc1b8a73SMel Gorman ret = 0; 1042fc1b8a73SMel Gorman if (delta < 0) 1043fc1b8a73SMel Gorman return_unused_surplus_pages((unsigned long) -delta); 1044fc1b8a73SMel Gorman 1045fc1b8a73SMel Gorman out: 1046fc1b8a73SMel Gorman spin_unlock(&hugetlb_lock); 1047fc1b8a73SMel Gorman return ret; 1048fc1b8a73SMel Gorman } 1049fc1b8a73SMel Gorman 1050a1e78772SMel Gorman static void hugetlb_vm_op_close(struct vm_area_struct *vma) 1051a1e78772SMel Gorman { 1052a1e78772SMel Gorman unsigned long reserve = vma_resv_huge_pages(vma); 1053a1e78772SMel Gorman if (reserve) 1054a1e78772SMel Gorman hugetlb_acct_memory(-reserve); 1055a1e78772SMel Gorman } 1056a1e78772SMel Gorman 10571da177e4SLinus Torvalds /* 10581da177e4SLinus Torvalds * We cannot handle pagefaults against hugetlb pages at all. They cause 10591da177e4SLinus Torvalds * handle_mm_fault() to try to instantiate regular-sized pages in the 10601da177e4SLinus Torvalds * hugegpage VMA. do_page_fault() is supposed to trap this, so BUG is we get 10611da177e4SLinus Torvalds * this far. 10621da177e4SLinus Torvalds */ 1063d0217ac0SNick Piggin static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 10641da177e4SLinus Torvalds { 10651da177e4SLinus Torvalds BUG(); 1066d0217ac0SNick Piggin return 0; 10671da177e4SLinus Torvalds } 10681da177e4SLinus Torvalds 10691da177e4SLinus Torvalds struct vm_operations_struct hugetlb_vm_ops = { 1070d0217ac0SNick Piggin .fault = hugetlb_vm_op_fault, 1071a1e78772SMel Gorman .close = hugetlb_vm_op_close, 10721da177e4SLinus Torvalds }; 10731da177e4SLinus Torvalds 10741e8f889bSDavid Gibson static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page, 10751e8f889bSDavid Gibson int writable) 107663551ae0SDavid Gibson { 107763551ae0SDavid Gibson pte_t entry; 107863551ae0SDavid Gibson 10791e8f889bSDavid Gibson if (writable) { 108063551ae0SDavid Gibson entry = 108163551ae0SDavid Gibson pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot))); 108263551ae0SDavid Gibson } else { 10837f2e9525SGerald Schaefer entry = huge_pte_wrprotect(mk_pte(page, vma->vm_page_prot)); 108463551ae0SDavid Gibson } 108563551ae0SDavid Gibson entry = pte_mkyoung(entry); 108663551ae0SDavid Gibson entry = pte_mkhuge(entry); 108763551ae0SDavid Gibson 108863551ae0SDavid Gibson return entry; 108963551ae0SDavid Gibson } 109063551ae0SDavid Gibson 10911e8f889bSDavid Gibson static void set_huge_ptep_writable(struct vm_area_struct *vma, 10921e8f889bSDavid Gibson unsigned long address, pte_t *ptep) 10931e8f889bSDavid Gibson { 10941e8f889bSDavid Gibson pte_t entry; 10951e8f889bSDavid Gibson 10967f2e9525SGerald Schaefer entry = pte_mkwrite(pte_mkdirty(huge_ptep_get(ptep))); 10977f2e9525SGerald Schaefer if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) { 10981e8f889bSDavid Gibson update_mmu_cache(vma, address, entry); 10991e8f889bSDavid Gibson } 11008dab5241SBenjamin Herrenschmidt } 11011e8f889bSDavid Gibson 11021e8f889bSDavid Gibson 110363551ae0SDavid Gibson int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, 110463551ae0SDavid Gibson struct vm_area_struct *vma) 110563551ae0SDavid Gibson { 110663551ae0SDavid Gibson pte_t *src_pte, *dst_pte, entry; 110763551ae0SDavid Gibson struct page *ptepage; 11081c59827dSHugh Dickins unsigned long addr; 11091e8f889bSDavid Gibson int cow; 11101e8f889bSDavid Gibson 11111e8f889bSDavid Gibson cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; 111263551ae0SDavid Gibson 11131c59827dSHugh Dickins for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) { 1114c74df32cSHugh Dickins src_pte = huge_pte_offset(src, addr); 1115c74df32cSHugh Dickins if (!src_pte) 1116c74df32cSHugh Dickins continue; 111763551ae0SDavid Gibson dst_pte = huge_pte_alloc(dst, addr); 111863551ae0SDavid Gibson if (!dst_pte) 111963551ae0SDavid Gibson goto nomem; 1120c5c99429SLarry Woodman 1121c5c99429SLarry Woodman /* If the pagetables are shared don't copy or take references */ 1122c5c99429SLarry Woodman if (dst_pte == src_pte) 1123c5c99429SLarry Woodman continue; 1124c5c99429SLarry Woodman 1125c74df32cSHugh Dickins spin_lock(&dst->page_table_lock); 112646478758SNick Piggin spin_lock_nested(&src->page_table_lock, SINGLE_DEPTH_NESTING); 11277f2e9525SGerald Schaefer if (!huge_pte_none(huge_ptep_get(src_pte))) { 11281e8f889bSDavid Gibson if (cow) 11297f2e9525SGerald Schaefer huge_ptep_set_wrprotect(src, addr, src_pte); 11307f2e9525SGerald Schaefer entry = huge_ptep_get(src_pte); 113163551ae0SDavid Gibson ptepage = pte_page(entry); 113263551ae0SDavid Gibson get_page(ptepage); 113363551ae0SDavid Gibson set_huge_pte_at(dst, addr, dst_pte, entry); 11341c59827dSHugh Dickins } 11351c59827dSHugh Dickins spin_unlock(&src->page_table_lock); 1136c74df32cSHugh Dickins spin_unlock(&dst->page_table_lock); 113763551ae0SDavid Gibson } 113863551ae0SDavid Gibson return 0; 113963551ae0SDavid Gibson 114063551ae0SDavid Gibson nomem: 114163551ae0SDavid Gibson return -ENOMEM; 114263551ae0SDavid Gibson } 114363551ae0SDavid Gibson 1144502717f4SChen, Kenneth W void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, 114504f2cbe3SMel Gorman unsigned long end, struct page *ref_page) 114663551ae0SDavid Gibson { 114763551ae0SDavid Gibson struct mm_struct *mm = vma->vm_mm; 114863551ae0SDavid Gibson unsigned long address; 1149c7546f8fSDavid Gibson pte_t *ptep; 115063551ae0SDavid Gibson pte_t pte; 115163551ae0SDavid Gibson struct page *page; 1152fe1668aeSChen, Kenneth W struct page *tmp; 1153c0a499c2SChen, Kenneth W /* 1154c0a499c2SChen, Kenneth W * A page gathering list, protected by per file i_mmap_lock. The 1155c0a499c2SChen, Kenneth W * lock is used to avoid list corruption from multiple unmapping 1156c0a499c2SChen, Kenneth W * of the same page since we are using page->lru. 1157c0a499c2SChen, Kenneth W */ 1158fe1668aeSChen, Kenneth W LIST_HEAD(page_list); 115963551ae0SDavid Gibson 116063551ae0SDavid Gibson WARN_ON(!is_vm_hugetlb_page(vma)); 116163551ae0SDavid Gibson BUG_ON(start & ~HPAGE_MASK); 116263551ae0SDavid Gibson BUG_ON(end & ~HPAGE_MASK); 116363551ae0SDavid Gibson 1164508034a3SHugh Dickins spin_lock(&mm->page_table_lock); 116563551ae0SDavid Gibson for (address = start; address < end; address += HPAGE_SIZE) { 1166c7546f8fSDavid Gibson ptep = huge_pte_offset(mm, address); 1167c7546f8fSDavid Gibson if (!ptep) 1168c7546f8fSDavid Gibson continue; 1169c7546f8fSDavid Gibson 117039dde65cSChen, Kenneth W if (huge_pmd_unshare(mm, &address, ptep)) 117139dde65cSChen, Kenneth W continue; 117239dde65cSChen, Kenneth W 117304f2cbe3SMel Gorman /* 117404f2cbe3SMel Gorman * If a reference page is supplied, it is because a specific 117504f2cbe3SMel Gorman * page is being unmapped, not a range. Ensure the page we 117604f2cbe3SMel Gorman * are about to unmap is the actual page of interest. 117704f2cbe3SMel Gorman */ 117804f2cbe3SMel Gorman if (ref_page) { 117904f2cbe3SMel Gorman pte = huge_ptep_get(ptep); 118004f2cbe3SMel Gorman if (huge_pte_none(pte)) 118104f2cbe3SMel Gorman continue; 118204f2cbe3SMel Gorman page = pte_page(pte); 118304f2cbe3SMel Gorman if (page != ref_page) 118404f2cbe3SMel Gorman continue; 118504f2cbe3SMel Gorman 118604f2cbe3SMel Gorman /* 118704f2cbe3SMel Gorman * Mark the VMA as having unmapped its page so that 118804f2cbe3SMel Gorman * future faults in this VMA will fail rather than 118904f2cbe3SMel Gorman * looking like data was lost 119004f2cbe3SMel Gorman */ 119104f2cbe3SMel Gorman set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED); 119204f2cbe3SMel Gorman } 119304f2cbe3SMel Gorman 1194c7546f8fSDavid Gibson pte = huge_ptep_get_and_clear(mm, address, ptep); 11957f2e9525SGerald Schaefer if (huge_pte_none(pte)) 119663551ae0SDavid Gibson continue; 1197c7546f8fSDavid Gibson 119863551ae0SDavid Gibson page = pte_page(pte); 11996649a386SKen Chen if (pte_dirty(pte)) 12006649a386SKen Chen set_page_dirty(page); 1201fe1668aeSChen, Kenneth W list_add(&page->lru, &page_list); 120263551ae0SDavid Gibson } 12031da177e4SLinus Torvalds spin_unlock(&mm->page_table_lock); 1204508034a3SHugh Dickins flush_tlb_range(vma, start, end); 1205fe1668aeSChen, Kenneth W list_for_each_entry_safe(page, tmp, &page_list, lru) { 1206fe1668aeSChen, Kenneth W list_del(&page->lru); 1207fe1668aeSChen, Kenneth W put_page(page); 1208fe1668aeSChen, Kenneth W } 12091da177e4SLinus Torvalds } 121063551ae0SDavid Gibson 1211502717f4SChen, Kenneth W void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, 121204f2cbe3SMel Gorman unsigned long end, struct page *ref_page) 1213502717f4SChen, Kenneth W { 1214502717f4SChen, Kenneth W /* 1215502717f4SChen, Kenneth W * It is undesirable to test vma->vm_file as it should be non-null 1216502717f4SChen, Kenneth W * for valid hugetlb area. However, vm_file will be NULL in the error 1217502717f4SChen, Kenneth W * cleanup path of do_mmap_pgoff. When hugetlbfs ->mmap method fails, 1218502717f4SChen, Kenneth W * do_mmap_pgoff() nullifies vma->vm_file before calling this function 1219502717f4SChen, Kenneth W * to clean up. Since no pte has actually been setup, it is safe to 1220502717f4SChen, Kenneth W * do nothing in this case. 1221502717f4SChen, Kenneth W */ 1222502717f4SChen, Kenneth W if (vma->vm_file) { 1223502717f4SChen, Kenneth W spin_lock(&vma->vm_file->f_mapping->i_mmap_lock); 122404f2cbe3SMel Gorman __unmap_hugepage_range(vma, start, end, ref_page); 1225502717f4SChen, Kenneth W spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock); 1226502717f4SChen, Kenneth W } 1227502717f4SChen, Kenneth W } 1228502717f4SChen, Kenneth W 122904f2cbe3SMel Gorman /* 123004f2cbe3SMel Gorman * This is called when the original mapper is failing to COW a MAP_PRIVATE 123104f2cbe3SMel Gorman * mappping it owns the reserve page for. The intention is to unmap the page 123204f2cbe3SMel Gorman * from other VMAs and let the children be SIGKILLed if they are faulting the 123304f2cbe3SMel Gorman * same region. 123404f2cbe3SMel Gorman */ 123504f2cbe3SMel Gorman int unmap_ref_private(struct mm_struct *mm, 123604f2cbe3SMel Gorman struct vm_area_struct *vma, 123704f2cbe3SMel Gorman struct page *page, 123804f2cbe3SMel Gorman unsigned long address) 123904f2cbe3SMel Gorman { 124004f2cbe3SMel Gorman struct vm_area_struct *iter_vma; 124104f2cbe3SMel Gorman struct address_space *mapping; 124204f2cbe3SMel Gorman struct prio_tree_iter iter; 124304f2cbe3SMel Gorman pgoff_t pgoff; 124404f2cbe3SMel Gorman 124504f2cbe3SMel Gorman /* 124604f2cbe3SMel Gorman * vm_pgoff is in PAGE_SIZE units, hence the different calculation 124704f2cbe3SMel Gorman * from page cache lookup which is in HPAGE_SIZE units. 124804f2cbe3SMel Gorman */ 124904f2cbe3SMel Gorman address = address & huge_page_mask(hstate_vma(vma)); 125004f2cbe3SMel Gorman pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) 125104f2cbe3SMel Gorman + (vma->vm_pgoff >> PAGE_SHIFT); 125204f2cbe3SMel Gorman mapping = (struct address_space *)page_private(page); 125304f2cbe3SMel Gorman 125404f2cbe3SMel Gorman vma_prio_tree_foreach(iter_vma, &iter, &mapping->i_mmap, pgoff, pgoff) { 125504f2cbe3SMel Gorman /* Do not unmap the current VMA */ 125604f2cbe3SMel Gorman if (iter_vma == vma) 125704f2cbe3SMel Gorman continue; 125804f2cbe3SMel Gorman 125904f2cbe3SMel Gorman /* 126004f2cbe3SMel Gorman * Unmap the page from other VMAs without their own reserves. 126104f2cbe3SMel Gorman * They get marked to be SIGKILLed if they fault in these 126204f2cbe3SMel Gorman * areas. This is because a future no-page fault on this VMA 126304f2cbe3SMel Gorman * could insert a zeroed page instead of the data existing 126404f2cbe3SMel Gorman * from the time of fork. This would look like data corruption 126504f2cbe3SMel Gorman */ 126604f2cbe3SMel Gorman if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER)) 126704f2cbe3SMel Gorman unmap_hugepage_range(iter_vma, 126804f2cbe3SMel Gorman address, address + HPAGE_SIZE, 126904f2cbe3SMel Gorman page); 127004f2cbe3SMel Gorman } 127104f2cbe3SMel Gorman 127204f2cbe3SMel Gorman return 1; 127304f2cbe3SMel Gorman } 127404f2cbe3SMel Gorman 12751e8f889bSDavid Gibson static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, 127604f2cbe3SMel Gorman unsigned long address, pte_t *ptep, pte_t pte, 127704f2cbe3SMel Gorman struct page *pagecache_page) 12781e8f889bSDavid Gibson { 12791e8f889bSDavid Gibson struct page *old_page, *new_page; 128079ac6ba4SDavid Gibson int avoidcopy; 128104f2cbe3SMel Gorman int outside_reserve = 0; 12821e8f889bSDavid Gibson 12831e8f889bSDavid Gibson old_page = pte_page(pte); 12841e8f889bSDavid Gibson 128504f2cbe3SMel Gorman retry_avoidcopy: 12861e8f889bSDavid Gibson /* If no-one else is actually using this page, avoid the copy 12871e8f889bSDavid Gibson * and just make the page writable */ 12881e8f889bSDavid Gibson avoidcopy = (page_count(old_page) == 1); 12891e8f889bSDavid Gibson if (avoidcopy) { 12901e8f889bSDavid Gibson set_huge_ptep_writable(vma, address, ptep); 129183c54070SNick Piggin return 0; 12921e8f889bSDavid Gibson } 12931e8f889bSDavid Gibson 129404f2cbe3SMel Gorman /* 129504f2cbe3SMel Gorman * If the process that created a MAP_PRIVATE mapping is about to 129604f2cbe3SMel Gorman * perform a COW due to a shared page count, attempt to satisfy 129704f2cbe3SMel Gorman * the allocation without using the existing reserves. The pagecache 129804f2cbe3SMel Gorman * page is used to determine if the reserve at this address was 129904f2cbe3SMel Gorman * consumed or not. If reserves were used, a partial faulted mapping 130004f2cbe3SMel Gorman * at the time of fork() could consume its reserves on COW instead 130104f2cbe3SMel Gorman * of the full address range. 130204f2cbe3SMel Gorman */ 130304f2cbe3SMel Gorman if (!(vma->vm_flags & VM_SHARED) && 130404f2cbe3SMel Gorman is_vma_resv_set(vma, HPAGE_RESV_OWNER) && 130504f2cbe3SMel Gorman old_page != pagecache_page) 130604f2cbe3SMel Gorman outside_reserve = 1; 130704f2cbe3SMel Gorman 13081e8f889bSDavid Gibson page_cache_get(old_page); 130904f2cbe3SMel Gorman new_page = alloc_huge_page(vma, address, outside_reserve); 13101e8f889bSDavid Gibson 13112fc39cecSAdam Litke if (IS_ERR(new_page)) { 13121e8f889bSDavid Gibson page_cache_release(old_page); 131304f2cbe3SMel Gorman 131404f2cbe3SMel Gorman /* 131504f2cbe3SMel Gorman * If a process owning a MAP_PRIVATE mapping fails to COW, 131604f2cbe3SMel Gorman * it is due to references held by a child and an insufficient 131704f2cbe3SMel Gorman * huge page pool. To guarantee the original mappers 131804f2cbe3SMel Gorman * reliability, unmap the page from child processes. The child 131904f2cbe3SMel Gorman * may get SIGKILLed if it later faults. 132004f2cbe3SMel Gorman */ 132104f2cbe3SMel Gorman if (outside_reserve) { 132204f2cbe3SMel Gorman BUG_ON(huge_pte_none(pte)); 132304f2cbe3SMel Gorman if (unmap_ref_private(mm, vma, old_page, address)) { 132404f2cbe3SMel Gorman BUG_ON(page_count(old_page) != 1); 132504f2cbe3SMel Gorman BUG_ON(huge_pte_none(pte)); 132604f2cbe3SMel Gorman goto retry_avoidcopy; 132704f2cbe3SMel Gorman } 132804f2cbe3SMel Gorman WARN_ON_ONCE(1); 132904f2cbe3SMel Gorman } 133004f2cbe3SMel Gorman 13312fc39cecSAdam Litke return -PTR_ERR(new_page); 13321e8f889bSDavid Gibson } 13331e8f889bSDavid Gibson 13341e8f889bSDavid Gibson spin_unlock(&mm->page_table_lock); 13359de455b2SAtsushi Nemoto copy_huge_page(new_page, old_page, address, vma); 13360ed361deSNick Piggin __SetPageUptodate(new_page); 13371e8f889bSDavid Gibson spin_lock(&mm->page_table_lock); 13381e8f889bSDavid Gibson 13391e8f889bSDavid Gibson ptep = huge_pte_offset(mm, address & HPAGE_MASK); 13407f2e9525SGerald Schaefer if (likely(pte_same(huge_ptep_get(ptep), pte))) { 13411e8f889bSDavid Gibson /* Break COW */ 13428fe627ecSGerald Schaefer huge_ptep_clear_flush(vma, address, ptep); 13431e8f889bSDavid Gibson set_huge_pte_at(mm, address, ptep, 13441e8f889bSDavid Gibson make_huge_pte(vma, new_page, 1)); 13451e8f889bSDavid Gibson /* Make the old page be freed below */ 13461e8f889bSDavid Gibson new_page = old_page; 13471e8f889bSDavid Gibson } 13481e8f889bSDavid Gibson page_cache_release(new_page); 13491e8f889bSDavid Gibson page_cache_release(old_page); 135083c54070SNick Piggin return 0; 13511e8f889bSDavid Gibson } 13521e8f889bSDavid Gibson 135304f2cbe3SMel Gorman /* Return the pagecache page at a given address within a VMA */ 135404f2cbe3SMel Gorman static struct page *hugetlbfs_pagecache_page(struct vm_area_struct *vma, 135504f2cbe3SMel Gorman unsigned long address) 135604f2cbe3SMel Gorman { 135704f2cbe3SMel Gorman struct address_space *mapping; 1358e7c4b0bfSAndy Whitcroft pgoff_t idx; 135904f2cbe3SMel Gorman 136004f2cbe3SMel Gorman mapping = vma->vm_file->f_mapping; 1361e7c4b0bfSAndy Whitcroft idx = vma_pagecache_offset(vma, address); 136204f2cbe3SMel Gorman 136304f2cbe3SMel Gorman return find_lock_page(mapping, idx); 136404f2cbe3SMel Gorman } 136504f2cbe3SMel Gorman 1366a1ed3ddaSRobert P. J. Day static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, 13671e8f889bSDavid Gibson unsigned long address, pte_t *ptep, int write_access) 1368ac9b9c66SHugh Dickins { 1369ac9b9c66SHugh Dickins int ret = VM_FAULT_SIGBUS; 1370e7c4b0bfSAndy Whitcroft pgoff_t idx; 13714c887265SAdam Litke unsigned long size; 13724c887265SAdam Litke struct page *page; 13734c887265SAdam Litke struct address_space *mapping; 13741e8f889bSDavid Gibson pte_t new_pte; 13754c887265SAdam Litke 137604f2cbe3SMel Gorman /* 137704f2cbe3SMel Gorman * Currently, we are forced to kill the process in the event the 137804f2cbe3SMel Gorman * original mapper has unmapped pages from the child due to a failed 137904f2cbe3SMel Gorman * COW. Warn that such a situation has occured as it may not be obvious 138004f2cbe3SMel Gorman */ 138104f2cbe3SMel Gorman if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) { 138204f2cbe3SMel Gorman printk(KERN_WARNING 138304f2cbe3SMel Gorman "PID %d killed due to inadequate hugepage pool\n", 138404f2cbe3SMel Gorman current->pid); 138504f2cbe3SMel Gorman return ret; 138604f2cbe3SMel Gorman } 138704f2cbe3SMel Gorman 13884c887265SAdam Litke mapping = vma->vm_file->f_mapping; 1389e7c4b0bfSAndy Whitcroft idx = vma_pagecache_offset(vma, address); 13904c887265SAdam Litke 13914c887265SAdam Litke /* 13924c887265SAdam Litke * Use page lock to guard against racing truncation 13934c887265SAdam Litke * before we get page_table_lock. 13944c887265SAdam Litke */ 13956bda666aSChristoph Lameter retry: 13966bda666aSChristoph Lameter page = find_lock_page(mapping, idx); 13976bda666aSChristoph Lameter if (!page) { 1398ebed4bfcSHugh Dickins size = i_size_read(mapping->host) >> HPAGE_SHIFT; 1399ebed4bfcSHugh Dickins if (idx >= size) 1400ebed4bfcSHugh Dickins goto out; 140104f2cbe3SMel Gorman page = alloc_huge_page(vma, address, 0); 14022fc39cecSAdam Litke if (IS_ERR(page)) { 14032fc39cecSAdam Litke ret = -PTR_ERR(page); 14046bda666aSChristoph Lameter goto out; 14056bda666aSChristoph Lameter } 140679ac6ba4SDavid Gibson clear_huge_page(page, address); 14070ed361deSNick Piggin __SetPageUptodate(page); 1408ac9b9c66SHugh Dickins 14096bda666aSChristoph Lameter if (vma->vm_flags & VM_SHARED) { 14106bda666aSChristoph Lameter int err; 141145c682a6SKen Chen struct inode *inode = mapping->host; 14126bda666aSChristoph Lameter 14136bda666aSChristoph Lameter err = add_to_page_cache(page, mapping, idx, GFP_KERNEL); 14146bda666aSChristoph Lameter if (err) { 14156bda666aSChristoph Lameter put_page(page); 14166bda666aSChristoph Lameter if (err == -EEXIST) 14176bda666aSChristoph Lameter goto retry; 14186bda666aSChristoph Lameter goto out; 14196bda666aSChristoph Lameter } 142045c682a6SKen Chen 142145c682a6SKen Chen spin_lock(&inode->i_lock); 142245c682a6SKen Chen inode->i_blocks += BLOCKS_PER_HUGEPAGE; 142345c682a6SKen Chen spin_unlock(&inode->i_lock); 14246bda666aSChristoph Lameter } else 14256bda666aSChristoph Lameter lock_page(page); 14266bda666aSChristoph Lameter } 14271e8f889bSDavid Gibson 1428ac9b9c66SHugh Dickins spin_lock(&mm->page_table_lock); 14294c887265SAdam Litke size = i_size_read(mapping->host) >> HPAGE_SHIFT; 14304c887265SAdam Litke if (idx >= size) 14314c887265SAdam Litke goto backout; 14324c887265SAdam Litke 143383c54070SNick Piggin ret = 0; 14347f2e9525SGerald Schaefer if (!huge_pte_none(huge_ptep_get(ptep))) 14354c887265SAdam Litke goto backout; 14364c887265SAdam Litke 14371e8f889bSDavid Gibson new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE) 14381e8f889bSDavid Gibson && (vma->vm_flags & VM_SHARED))); 14391e8f889bSDavid Gibson set_huge_pte_at(mm, address, ptep, new_pte); 14401e8f889bSDavid Gibson 14411e8f889bSDavid Gibson if (write_access && !(vma->vm_flags & VM_SHARED)) { 14421e8f889bSDavid Gibson /* Optimization, do the COW without a second fault */ 144304f2cbe3SMel Gorman ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page); 14441e8f889bSDavid Gibson } 14451e8f889bSDavid Gibson 1446ac9b9c66SHugh Dickins spin_unlock(&mm->page_table_lock); 14474c887265SAdam Litke unlock_page(page); 14484c887265SAdam Litke out: 1449ac9b9c66SHugh Dickins return ret; 14504c887265SAdam Litke 14514c887265SAdam Litke backout: 14524c887265SAdam Litke spin_unlock(&mm->page_table_lock); 14534c887265SAdam Litke unlock_page(page); 14544c887265SAdam Litke put_page(page); 14554c887265SAdam Litke goto out; 1456ac9b9c66SHugh Dickins } 1457ac9b9c66SHugh Dickins 145886e5216fSAdam Litke int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, 145986e5216fSAdam Litke unsigned long address, int write_access) 146086e5216fSAdam Litke { 146186e5216fSAdam Litke pte_t *ptep; 146286e5216fSAdam Litke pte_t entry; 14631e8f889bSDavid Gibson int ret; 14643935baa9SDavid Gibson static DEFINE_MUTEX(hugetlb_instantiation_mutex); 146586e5216fSAdam Litke 146686e5216fSAdam Litke ptep = huge_pte_alloc(mm, address); 146786e5216fSAdam Litke if (!ptep) 146886e5216fSAdam Litke return VM_FAULT_OOM; 146986e5216fSAdam Litke 14703935baa9SDavid Gibson /* 14713935baa9SDavid Gibson * Serialize hugepage allocation and instantiation, so that we don't 14723935baa9SDavid Gibson * get spurious allocation failures if two CPUs race to instantiate 14733935baa9SDavid Gibson * the same page in the page cache. 14743935baa9SDavid Gibson */ 14753935baa9SDavid Gibson mutex_lock(&hugetlb_instantiation_mutex); 14767f2e9525SGerald Schaefer entry = huge_ptep_get(ptep); 14777f2e9525SGerald Schaefer if (huge_pte_none(entry)) { 14783935baa9SDavid Gibson ret = hugetlb_no_page(mm, vma, address, ptep, write_access); 14793935baa9SDavid Gibson mutex_unlock(&hugetlb_instantiation_mutex); 14803935baa9SDavid Gibson return ret; 14813935baa9SDavid Gibson } 148286e5216fSAdam Litke 148383c54070SNick Piggin ret = 0; 14841e8f889bSDavid Gibson 14851e8f889bSDavid Gibson spin_lock(&mm->page_table_lock); 14861e8f889bSDavid Gibson /* Check for a racing update before calling hugetlb_cow */ 14877f2e9525SGerald Schaefer if (likely(pte_same(entry, huge_ptep_get(ptep)))) 148804f2cbe3SMel Gorman if (write_access && !pte_write(entry)) { 148904f2cbe3SMel Gorman struct page *page; 149004f2cbe3SMel Gorman page = hugetlbfs_pagecache_page(vma, address); 149104f2cbe3SMel Gorman ret = hugetlb_cow(mm, vma, address, ptep, entry, page); 149204f2cbe3SMel Gorman if (page) { 149304f2cbe3SMel Gorman unlock_page(page); 149404f2cbe3SMel Gorman put_page(page); 149504f2cbe3SMel Gorman } 149604f2cbe3SMel Gorman } 14971e8f889bSDavid Gibson spin_unlock(&mm->page_table_lock); 14983935baa9SDavid Gibson mutex_unlock(&hugetlb_instantiation_mutex); 14991e8f889bSDavid Gibson 15001e8f889bSDavid Gibson return ret; 150186e5216fSAdam Litke } 150286e5216fSAdam Litke 150363551ae0SDavid Gibson int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, 150463551ae0SDavid Gibson struct page **pages, struct vm_area_struct **vmas, 15055b23dbe8SAdam Litke unsigned long *position, int *length, int i, 15065b23dbe8SAdam Litke int write) 150763551ae0SDavid Gibson { 1508d5d4b0aaSChen, Kenneth W unsigned long pfn_offset; 1509d5d4b0aaSChen, Kenneth W unsigned long vaddr = *position; 151063551ae0SDavid Gibson int remainder = *length; 151163551ae0SDavid Gibson 15121c59827dSHugh Dickins spin_lock(&mm->page_table_lock); 151363551ae0SDavid Gibson while (vaddr < vma->vm_end && remainder) { 151463551ae0SDavid Gibson pte_t *pte; 151563551ae0SDavid Gibson struct page *page; 151663551ae0SDavid Gibson 15174c887265SAdam Litke /* 15184c887265SAdam Litke * Some archs (sparc64, sh*) have multiple pte_ts to 15194c887265SAdam Litke * each hugepage. We have to make * sure we get the 15204c887265SAdam Litke * first, for the page indexing below to work. 15214c887265SAdam Litke */ 152263551ae0SDavid Gibson pte = huge_pte_offset(mm, vaddr & HPAGE_MASK); 152363551ae0SDavid Gibson 15247f2e9525SGerald Schaefer if (!pte || huge_pte_none(huge_ptep_get(pte)) || 15257f2e9525SGerald Schaefer (write && !pte_write(huge_ptep_get(pte)))) { 15264c887265SAdam Litke int ret; 15274c887265SAdam Litke 15284c887265SAdam Litke spin_unlock(&mm->page_table_lock); 15295b23dbe8SAdam Litke ret = hugetlb_fault(mm, vma, vaddr, write); 15304c887265SAdam Litke spin_lock(&mm->page_table_lock); 1531a89182c7SAdam Litke if (!(ret & VM_FAULT_ERROR)) 15324c887265SAdam Litke continue; 15334c887265SAdam Litke 15341c59827dSHugh Dickins remainder = 0; 15351c59827dSHugh Dickins if (!i) 15361c59827dSHugh Dickins i = -EFAULT; 15371c59827dSHugh Dickins break; 15381c59827dSHugh Dickins } 153963551ae0SDavid Gibson 1540d5d4b0aaSChen, Kenneth W pfn_offset = (vaddr & ~HPAGE_MASK) >> PAGE_SHIFT; 15417f2e9525SGerald Schaefer page = pte_page(huge_ptep_get(pte)); 1542d5d4b0aaSChen, Kenneth W same_page: 1543d6692183SChen, Kenneth W if (pages) { 154463551ae0SDavid Gibson get_page(page); 1545d5d4b0aaSChen, Kenneth W pages[i] = page + pfn_offset; 1546d6692183SChen, Kenneth W } 154763551ae0SDavid Gibson 154863551ae0SDavid Gibson if (vmas) 154963551ae0SDavid Gibson vmas[i] = vma; 155063551ae0SDavid Gibson 155163551ae0SDavid Gibson vaddr += PAGE_SIZE; 1552d5d4b0aaSChen, Kenneth W ++pfn_offset; 155363551ae0SDavid Gibson --remainder; 155463551ae0SDavid Gibson ++i; 1555d5d4b0aaSChen, Kenneth W if (vaddr < vma->vm_end && remainder && 1556d5d4b0aaSChen, Kenneth W pfn_offset < HPAGE_SIZE/PAGE_SIZE) { 1557d5d4b0aaSChen, Kenneth W /* 1558d5d4b0aaSChen, Kenneth W * We use pfn_offset to avoid touching the pageframes 1559d5d4b0aaSChen, Kenneth W * of this compound page. 1560d5d4b0aaSChen, Kenneth W */ 1561d5d4b0aaSChen, Kenneth W goto same_page; 1562d5d4b0aaSChen, Kenneth W } 156363551ae0SDavid Gibson } 15641c59827dSHugh Dickins spin_unlock(&mm->page_table_lock); 156563551ae0SDavid Gibson *length = remainder; 156663551ae0SDavid Gibson *position = vaddr; 156763551ae0SDavid Gibson 156863551ae0SDavid Gibson return i; 156963551ae0SDavid Gibson } 15708f860591SZhang, Yanmin 15718f860591SZhang, Yanmin void hugetlb_change_protection(struct vm_area_struct *vma, 15728f860591SZhang, Yanmin unsigned long address, unsigned long end, pgprot_t newprot) 15738f860591SZhang, Yanmin { 15748f860591SZhang, Yanmin struct mm_struct *mm = vma->vm_mm; 15758f860591SZhang, Yanmin unsigned long start = address; 15768f860591SZhang, Yanmin pte_t *ptep; 15778f860591SZhang, Yanmin pte_t pte; 15788f860591SZhang, Yanmin 15798f860591SZhang, Yanmin BUG_ON(address >= end); 15808f860591SZhang, Yanmin flush_cache_range(vma, address, end); 15818f860591SZhang, Yanmin 158239dde65cSChen, Kenneth W spin_lock(&vma->vm_file->f_mapping->i_mmap_lock); 15838f860591SZhang, Yanmin spin_lock(&mm->page_table_lock); 15848f860591SZhang, Yanmin for (; address < end; address += HPAGE_SIZE) { 15858f860591SZhang, Yanmin ptep = huge_pte_offset(mm, address); 15868f860591SZhang, Yanmin if (!ptep) 15878f860591SZhang, Yanmin continue; 158839dde65cSChen, Kenneth W if (huge_pmd_unshare(mm, &address, ptep)) 158939dde65cSChen, Kenneth W continue; 15907f2e9525SGerald Schaefer if (!huge_pte_none(huge_ptep_get(ptep))) { 15918f860591SZhang, Yanmin pte = huge_ptep_get_and_clear(mm, address, ptep); 15928f860591SZhang, Yanmin pte = pte_mkhuge(pte_modify(pte, newprot)); 15938f860591SZhang, Yanmin set_huge_pte_at(mm, address, ptep, pte); 15948f860591SZhang, Yanmin } 15958f860591SZhang, Yanmin } 15968f860591SZhang, Yanmin spin_unlock(&mm->page_table_lock); 159739dde65cSChen, Kenneth W spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock); 15988f860591SZhang, Yanmin 15998f860591SZhang, Yanmin flush_tlb_range(vma, start, end); 16008f860591SZhang, Yanmin } 16018f860591SZhang, Yanmin 1602a1e78772SMel Gorman int hugetlb_reserve_pages(struct inode *inode, 1603a1e78772SMel Gorman long from, long to, 1604a1e78772SMel Gorman struct vm_area_struct *vma) 1605e4e574b7SAdam Litke { 1606e4e574b7SAdam Litke long ret, chg; 1607e4e574b7SAdam Litke 1608c37f9fb1SAndy Whitcroft if (vma && vma->vm_flags & VM_NORESERVE) 1609c37f9fb1SAndy Whitcroft return 0; 1610c37f9fb1SAndy Whitcroft 1611a1e78772SMel Gorman /* 1612a1e78772SMel Gorman * Shared mappings base their reservation on the number of pages that 1613a1e78772SMel Gorman * are already allocated on behalf of the file. Private mappings need 1614a1e78772SMel Gorman * to reserve the full area even if read-only as mprotect() may be 1615a1e78772SMel Gorman * called to make the mapping read-write. Assume !vma is a shm mapping 1616a1e78772SMel Gorman */ 1617a1e78772SMel Gorman if (!vma || vma->vm_flags & VM_SHARED) 1618e4e574b7SAdam Litke chg = region_chg(&inode->i_mapping->private_list, from, to); 1619a1e78772SMel Gorman else { 1620a1e78772SMel Gorman chg = to - from; 1621a1e78772SMel Gorman set_vma_resv_huge_pages(vma, chg); 162204f2cbe3SMel Gorman set_vma_resv_flags(vma, HPAGE_RESV_OWNER); 1623a1e78772SMel Gorman } 1624a1e78772SMel Gorman 1625e4e574b7SAdam Litke if (chg < 0) 1626e4e574b7SAdam Litke return chg; 16278a630112SKen Chen 162890d8b7e6SAdam Litke if (hugetlb_get_quota(inode->i_mapping, chg)) 162990d8b7e6SAdam Litke return -ENOSPC; 1630a43a8c39SChen, Kenneth W ret = hugetlb_acct_memory(chg); 163168842c9bSKen Chen if (ret < 0) { 163268842c9bSKen Chen hugetlb_put_quota(inode->i_mapping, chg); 1633a43a8c39SChen, Kenneth W return ret; 163468842c9bSKen Chen } 1635a1e78772SMel Gorman if (!vma || vma->vm_flags & VM_SHARED) 1636a43a8c39SChen, Kenneth W region_add(&inode->i_mapping->private_list, from, to); 1637a43a8c39SChen, Kenneth W return 0; 1638a43a8c39SChen, Kenneth W } 1639a43a8c39SChen, Kenneth W 1640a43a8c39SChen, Kenneth W void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed) 1641a43a8c39SChen, Kenneth W { 1642a43a8c39SChen, Kenneth W long chg = region_truncate(&inode->i_mapping->private_list, offset); 164345c682a6SKen Chen 164445c682a6SKen Chen spin_lock(&inode->i_lock); 164545c682a6SKen Chen inode->i_blocks -= BLOCKS_PER_HUGEPAGE * freed; 164645c682a6SKen Chen spin_unlock(&inode->i_lock); 164745c682a6SKen Chen 164890d8b7e6SAdam Litke hugetlb_put_quota(inode->i_mapping, (chg - freed)); 164990d8b7e6SAdam Litke hugetlb_acct_memory(-(chg - freed)); 1650a43a8c39SChen, Kenneth W } 1651