11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * Generic hugetlb support. 31da177e4SLinus Torvalds * (C) William Irwin, April 2004 41da177e4SLinus Torvalds */ 51da177e4SLinus Torvalds #include <linux/gfp.h> 61da177e4SLinus Torvalds #include <linux/list.h> 71da177e4SLinus Torvalds #include <linux/init.h> 81da177e4SLinus Torvalds #include <linux/module.h> 91da177e4SLinus Torvalds #include <linux/mm.h> 101da177e4SLinus Torvalds #include <linux/sysctl.h> 111da177e4SLinus Torvalds #include <linux/highmem.h> 121da177e4SLinus Torvalds #include <linux/nodemask.h> 1363551ae0SDavid Gibson #include <linux/pagemap.h> 145da7ca86SChristoph Lameter #include <linux/mempolicy.h> 15aea47ff3SChristoph Lameter #include <linux/cpuset.h> 163935baa9SDavid Gibson #include <linux/mutex.h> 175da7ca86SChristoph Lameter 1863551ae0SDavid Gibson #include <asm/page.h> 1963551ae0SDavid Gibson #include <asm/pgtable.h> 2063551ae0SDavid Gibson 2163551ae0SDavid Gibson #include <linux/hugetlb.h> 227835e98bSNick Piggin #include "internal.h" 231da177e4SLinus Torvalds 241da177e4SLinus Torvalds const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL; 25a43a8c39SChen, Kenneth W static unsigned long nr_huge_pages, free_huge_pages, resv_huge_pages; 267893d1d5SAdam Litke static unsigned long surplus_huge_pages; 27064d9efeSNishanth Aravamudan static unsigned long nr_overcommit_huge_pages; 281da177e4SLinus Torvalds unsigned long max_huge_pages; 29064d9efeSNishanth Aravamudan unsigned long sysctl_overcommit_huge_pages; 301da177e4SLinus Torvalds static struct list_head hugepage_freelists[MAX_NUMNODES]; 311da177e4SLinus Torvalds static unsigned int nr_huge_pages_node[MAX_NUMNODES]; 321da177e4SLinus Torvalds static unsigned int free_huge_pages_node[MAX_NUMNODES]; 337893d1d5SAdam Litke static unsigned int surplus_huge_pages_node[MAX_NUMNODES]; 34396faf03SMel Gorman static gfp_t htlb_alloc_mask = GFP_HIGHUSER; 35396faf03SMel Gorman unsigned long hugepages_treat_as_movable; 3663b4613cSNishanth Aravamudan static int hugetlb_next_nid; 37396faf03SMel Gorman 383935baa9SDavid Gibson /* 393935baa9SDavid Gibson * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages 403935baa9SDavid Gibson */ 413935baa9SDavid Gibson static DEFINE_SPINLOCK(hugetlb_lock); 420bd0f9fbSEric Paris 4379ac6ba4SDavid Gibson static void clear_huge_page(struct page *page, unsigned long addr) 4479ac6ba4SDavid Gibson { 4579ac6ba4SDavid Gibson int i; 4679ac6ba4SDavid Gibson 4779ac6ba4SDavid Gibson might_sleep(); 4879ac6ba4SDavid Gibson for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); i++) { 4979ac6ba4SDavid Gibson cond_resched(); 50281e0e3bSRalf Baechle clear_user_highpage(page + i, addr + i * PAGE_SIZE); 5179ac6ba4SDavid Gibson } 5279ac6ba4SDavid Gibson } 5379ac6ba4SDavid Gibson 5479ac6ba4SDavid Gibson static void copy_huge_page(struct page *dst, struct page *src, 559de455b2SAtsushi Nemoto unsigned long addr, struct vm_area_struct *vma) 5679ac6ba4SDavid Gibson { 5779ac6ba4SDavid Gibson int i; 5879ac6ba4SDavid Gibson 5979ac6ba4SDavid Gibson might_sleep(); 6079ac6ba4SDavid Gibson for (i = 0; i < HPAGE_SIZE/PAGE_SIZE; i++) { 6179ac6ba4SDavid Gibson cond_resched(); 629de455b2SAtsushi Nemoto copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma); 6379ac6ba4SDavid Gibson } 6479ac6ba4SDavid Gibson } 6579ac6ba4SDavid Gibson 661da177e4SLinus Torvalds static void enqueue_huge_page(struct page *page) 671da177e4SLinus Torvalds { 681da177e4SLinus Torvalds int nid = page_to_nid(page); 691da177e4SLinus Torvalds list_add(&page->lru, &hugepage_freelists[nid]); 701da177e4SLinus Torvalds free_huge_pages++; 711da177e4SLinus Torvalds free_huge_pages_node[nid]++; 721da177e4SLinus Torvalds } 731da177e4SLinus Torvalds 74348e1e04SNishanth Aravamudan static struct page *dequeue_huge_page(void) 75348e1e04SNishanth Aravamudan { 76348e1e04SNishanth Aravamudan int nid; 77348e1e04SNishanth Aravamudan struct page *page = NULL; 78348e1e04SNishanth Aravamudan 79348e1e04SNishanth Aravamudan for (nid = 0; nid < MAX_NUMNODES; ++nid) { 80348e1e04SNishanth Aravamudan if (!list_empty(&hugepage_freelists[nid])) { 81348e1e04SNishanth Aravamudan page = list_entry(hugepage_freelists[nid].next, 82348e1e04SNishanth Aravamudan struct page, lru); 83348e1e04SNishanth Aravamudan list_del(&page->lru); 84348e1e04SNishanth Aravamudan free_huge_pages--; 85348e1e04SNishanth Aravamudan free_huge_pages_node[nid]--; 86348e1e04SNishanth Aravamudan break; 87348e1e04SNishanth Aravamudan } 88348e1e04SNishanth Aravamudan } 89348e1e04SNishanth Aravamudan return page; 90348e1e04SNishanth Aravamudan } 91348e1e04SNishanth Aravamudan 92348e1e04SNishanth Aravamudan static struct page *dequeue_huge_page_vma(struct vm_area_struct *vma, 935da7ca86SChristoph Lameter unsigned long address) 941da177e4SLinus Torvalds { 9531a5c6e4SNishanth Aravamudan int nid; 961da177e4SLinus Torvalds struct page *page = NULL; 97480eccf9SLee Schermerhorn struct mempolicy *mpol; 9819770b32SMel Gorman nodemask_t *nodemask; 99396faf03SMel Gorman struct zonelist *zonelist = huge_zonelist(vma, address, 10019770b32SMel Gorman htlb_alloc_mask, &mpol, &nodemask); 101dd1a239fSMel Gorman struct zone *zone; 102dd1a239fSMel Gorman struct zoneref *z; 1031da177e4SLinus Torvalds 10419770b32SMel Gorman for_each_zone_zonelist_nodemask(zone, z, zonelist, 10519770b32SMel Gorman MAX_NR_ZONES - 1, nodemask) { 10654a6eb5cSMel Gorman nid = zone_to_nid(zone); 10754a6eb5cSMel Gorman if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask) && 1083abf7afdSAndrew Morton !list_empty(&hugepage_freelists[nid])) { 1091da177e4SLinus Torvalds page = list_entry(hugepage_freelists[nid].next, 1101da177e4SLinus Torvalds struct page, lru); 1111da177e4SLinus Torvalds list_del(&page->lru); 1121da177e4SLinus Torvalds free_huge_pages--; 1131da177e4SLinus Torvalds free_huge_pages_node[nid]--; 114e4e574b7SAdam Litke if (vma && vma->vm_flags & VM_MAYSHARE) 115e4e574b7SAdam Litke resv_huge_pages--; 1165ab3ee7bSKen Chen break; 1171da177e4SLinus Torvalds } 1183abf7afdSAndrew Morton } 11952cd3b07SLee Schermerhorn mpol_cond_put(mpol); 1201da177e4SLinus Torvalds return page; 1211da177e4SLinus Torvalds } 1221da177e4SLinus Torvalds 1236af2acb6SAdam Litke static void update_and_free_page(struct page *page) 1246af2acb6SAdam Litke { 1256af2acb6SAdam Litke int i; 1266af2acb6SAdam Litke nr_huge_pages--; 1276af2acb6SAdam Litke nr_huge_pages_node[page_to_nid(page)]--; 1286af2acb6SAdam Litke for (i = 0; i < (HPAGE_SIZE / PAGE_SIZE); i++) { 1296af2acb6SAdam Litke page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced | 1306af2acb6SAdam Litke 1 << PG_dirty | 1 << PG_active | 1 << PG_reserved | 1316af2acb6SAdam Litke 1 << PG_private | 1<< PG_writeback); 1326af2acb6SAdam Litke } 1336af2acb6SAdam Litke set_compound_page_dtor(page, NULL); 1346af2acb6SAdam Litke set_page_refcounted(page); 1357f2e9525SGerald Schaefer arch_release_hugepage(page); 1366af2acb6SAdam Litke __free_pages(page, HUGETLB_PAGE_ORDER); 1376af2acb6SAdam Litke } 1386af2acb6SAdam Litke 13927a85ef1SDavid Gibson static void free_huge_page(struct page *page) 14027a85ef1SDavid Gibson { 1417893d1d5SAdam Litke int nid = page_to_nid(page); 142c79fb75eSAdam Litke struct address_space *mapping; 14327a85ef1SDavid Gibson 144c79fb75eSAdam Litke mapping = (struct address_space *) page_private(page); 145e5df70abSAndy Whitcroft set_page_private(page, 0); 1467893d1d5SAdam Litke BUG_ON(page_count(page)); 14727a85ef1SDavid Gibson INIT_LIST_HEAD(&page->lru); 14827a85ef1SDavid Gibson 14927a85ef1SDavid Gibson spin_lock(&hugetlb_lock); 1507893d1d5SAdam Litke if (surplus_huge_pages_node[nid]) { 1517893d1d5SAdam Litke update_and_free_page(page); 1527893d1d5SAdam Litke surplus_huge_pages--; 1537893d1d5SAdam Litke surplus_huge_pages_node[nid]--; 1547893d1d5SAdam Litke } else { 15527a85ef1SDavid Gibson enqueue_huge_page(page); 1567893d1d5SAdam Litke } 15727a85ef1SDavid Gibson spin_unlock(&hugetlb_lock); 158c79fb75eSAdam Litke if (mapping) 1599a119c05SAdam Litke hugetlb_put_quota(mapping, 1); 16027a85ef1SDavid Gibson } 16127a85ef1SDavid Gibson 1627893d1d5SAdam Litke /* 1637893d1d5SAdam Litke * Increment or decrement surplus_huge_pages. Keep node-specific counters 1647893d1d5SAdam Litke * balanced by operating on them in a round-robin fashion. 1657893d1d5SAdam Litke * Returns 1 if an adjustment was made. 1667893d1d5SAdam Litke */ 1677893d1d5SAdam Litke static int adjust_pool_surplus(int delta) 1687893d1d5SAdam Litke { 1697893d1d5SAdam Litke static int prev_nid; 1707893d1d5SAdam Litke int nid = prev_nid; 1717893d1d5SAdam Litke int ret = 0; 1727893d1d5SAdam Litke 1737893d1d5SAdam Litke VM_BUG_ON(delta != -1 && delta != 1); 1747893d1d5SAdam Litke do { 1757893d1d5SAdam Litke nid = next_node(nid, node_online_map); 1767893d1d5SAdam Litke if (nid == MAX_NUMNODES) 1777893d1d5SAdam Litke nid = first_node(node_online_map); 1787893d1d5SAdam Litke 1797893d1d5SAdam Litke /* To shrink on this node, there must be a surplus page */ 1807893d1d5SAdam Litke if (delta < 0 && !surplus_huge_pages_node[nid]) 1817893d1d5SAdam Litke continue; 1827893d1d5SAdam Litke /* Surplus cannot exceed the total number of pages */ 1837893d1d5SAdam Litke if (delta > 0 && surplus_huge_pages_node[nid] >= 1847893d1d5SAdam Litke nr_huge_pages_node[nid]) 1857893d1d5SAdam Litke continue; 1867893d1d5SAdam Litke 1877893d1d5SAdam Litke surplus_huge_pages += delta; 1887893d1d5SAdam Litke surplus_huge_pages_node[nid] += delta; 1897893d1d5SAdam Litke ret = 1; 1907893d1d5SAdam Litke break; 1917893d1d5SAdam Litke } while (nid != prev_nid); 1927893d1d5SAdam Litke 1937893d1d5SAdam Litke prev_nid = nid; 1947893d1d5SAdam Litke return ret; 1957893d1d5SAdam Litke } 1967893d1d5SAdam Litke 19763b4613cSNishanth Aravamudan static struct page *alloc_fresh_huge_page_node(int nid) 1981da177e4SLinus Torvalds { 1991da177e4SLinus Torvalds struct page *page; 200f96efd58SJoe Jin 20163b4613cSNishanth Aravamudan page = alloc_pages_node(nid, 202551883aeSNishanth Aravamudan htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE| 203551883aeSNishanth Aravamudan __GFP_REPEAT|__GFP_NOWARN, 204f96efd58SJoe Jin HUGETLB_PAGE_ORDER); 2051da177e4SLinus Torvalds if (page) { 2067f2e9525SGerald Schaefer if (arch_prepare_hugepage(page)) { 2077f2e9525SGerald Schaefer __free_pages(page, HUGETLB_PAGE_ORDER); 2087b8ee84dSHarvey Harrison return NULL; 2097f2e9525SGerald Schaefer } 21033f2ef89SAndy Whitcroft set_compound_page_dtor(page, free_huge_page); 2110bd0f9fbSEric Paris spin_lock(&hugetlb_lock); 2121da177e4SLinus Torvalds nr_huge_pages++; 21363b4613cSNishanth Aravamudan nr_huge_pages_node[nid]++; 2140bd0f9fbSEric Paris spin_unlock(&hugetlb_lock); 215a482289dSNick Piggin put_page(page); /* free it into the hugepage allocator */ 2161da177e4SLinus Torvalds } 21763b4613cSNishanth Aravamudan 21863b4613cSNishanth Aravamudan return page; 21963b4613cSNishanth Aravamudan } 22063b4613cSNishanth Aravamudan 22163b4613cSNishanth Aravamudan static int alloc_fresh_huge_page(void) 22263b4613cSNishanth Aravamudan { 22363b4613cSNishanth Aravamudan struct page *page; 22463b4613cSNishanth Aravamudan int start_nid; 22563b4613cSNishanth Aravamudan int next_nid; 22663b4613cSNishanth Aravamudan int ret = 0; 22763b4613cSNishanth Aravamudan 22863b4613cSNishanth Aravamudan start_nid = hugetlb_next_nid; 22963b4613cSNishanth Aravamudan 23063b4613cSNishanth Aravamudan do { 23163b4613cSNishanth Aravamudan page = alloc_fresh_huge_page_node(hugetlb_next_nid); 23263b4613cSNishanth Aravamudan if (page) 23363b4613cSNishanth Aravamudan ret = 1; 23463b4613cSNishanth Aravamudan /* 23563b4613cSNishanth Aravamudan * Use a helper variable to find the next node and then 23663b4613cSNishanth Aravamudan * copy it back to hugetlb_next_nid afterwards: 23763b4613cSNishanth Aravamudan * otherwise there's a window in which a racer might 23863b4613cSNishanth Aravamudan * pass invalid nid MAX_NUMNODES to alloc_pages_node. 23963b4613cSNishanth Aravamudan * But we don't need to use a spin_lock here: it really 24063b4613cSNishanth Aravamudan * doesn't matter if occasionally a racer chooses the 24163b4613cSNishanth Aravamudan * same nid as we do. Move nid forward in the mask even 24263b4613cSNishanth Aravamudan * if we just successfully allocated a hugepage so that 24363b4613cSNishanth Aravamudan * the next caller gets hugepages on the next node. 24463b4613cSNishanth Aravamudan */ 24563b4613cSNishanth Aravamudan next_nid = next_node(hugetlb_next_nid, node_online_map); 24663b4613cSNishanth Aravamudan if (next_nid == MAX_NUMNODES) 24763b4613cSNishanth Aravamudan next_nid = first_node(node_online_map); 24863b4613cSNishanth Aravamudan hugetlb_next_nid = next_nid; 24963b4613cSNishanth Aravamudan } while (!page && hugetlb_next_nid != start_nid); 25063b4613cSNishanth Aravamudan 2513b116300SAdam Litke if (ret) 2523b116300SAdam Litke count_vm_event(HTLB_BUDDY_PGALLOC); 2533b116300SAdam Litke else 2543b116300SAdam Litke count_vm_event(HTLB_BUDDY_PGALLOC_FAIL); 2553b116300SAdam Litke 25663b4613cSNishanth Aravamudan return ret; 2571da177e4SLinus Torvalds } 2581da177e4SLinus Torvalds 2597893d1d5SAdam Litke static struct page *alloc_buddy_huge_page(struct vm_area_struct *vma, 2607893d1d5SAdam Litke unsigned long address) 2617893d1d5SAdam Litke { 2627893d1d5SAdam Litke struct page *page; 263d1c3fb1fSNishanth Aravamudan unsigned int nid; 2647893d1d5SAdam Litke 265d1c3fb1fSNishanth Aravamudan /* 266d1c3fb1fSNishanth Aravamudan * Assume we will successfully allocate the surplus page to 267d1c3fb1fSNishanth Aravamudan * prevent racing processes from causing the surplus to exceed 268d1c3fb1fSNishanth Aravamudan * overcommit 269d1c3fb1fSNishanth Aravamudan * 270d1c3fb1fSNishanth Aravamudan * This however introduces a different race, where a process B 271d1c3fb1fSNishanth Aravamudan * tries to grow the static hugepage pool while alloc_pages() is 272d1c3fb1fSNishanth Aravamudan * called by process A. B will only examine the per-node 273d1c3fb1fSNishanth Aravamudan * counters in determining if surplus huge pages can be 274d1c3fb1fSNishanth Aravamudan * converted to normal huge pages in adjust_pool_surplus(). A 275d1c3fb1fSNishanth Aravamudan * won't be able to increment the per-node counter, until the 276d1c3fb1fSNishanth Aravamudan * lock is dropped by B, but B doesn't drop hugetlb_lock until 277d1c3fb1fSNishanth Aravamudan * no more huge pages can be converted from surplus to normal 278d1c3fb1fSNishanth Aravamudan * state (and doesn't try to convert again). Thus, we have a 279d1c3fb1fSNishanth Aravamudan * case where a surplus huge page exists, the pool is grown, and 280d1c3fb1fSNishanth Aravamudan * the surplus huge page still exists after, even though it 281d1c3fb1fSNishanth Aravamudan * should just have been converted to a normal huge page. This 282d1c3fb1fSNishanth Aravamudan * does not leak memory, though, as the hugepage will be freed 283d1c3fb1fSNishanth Aravamudan * once it is out of use. It also does not allow the counters to 284d1c3fb1fSNishanth Aravamudan * go out of whack in adjust_pool_surplus() as we don't modify 285d1c3fb1fSNishanth Aravamudan * the node values until we've gotten the hugepage and only the 286d1c3fb1fSNishanth Aravamudan * per-node value is checked there. 287d1c3fb1fSNishanth Aravamudan */ 288d1c3fb1fSNishanth Aravamudan spin_lock(&hugetlb_lock); 289d1c3fb1fSNishanth Aravamudan if (surplus_huge_pages >= nr_overcommit_huge_pages) { 290d1c3fb1fSNishanth Aravamudan spin_unlock(&hugetlb_lock); 291d1c3fb1fSNishanth Aravamudan return NULL; 292d1c3fb1fSNishanth Aravamudan } else { 293d1c3fb1fSNishanth Aravamudan nr_huge_pages++; 294d1c3fb1fSNishanth Aravamudan surplus_huge_pages++; 295d1c3fb1fSNishanth Aravamudan } 296d1c3fb1fSNishanth Aravamudan spin_unlock(&hugetlb_lock); 297d1c3fb1fSNishanth Aravamudan 298551883aeSNishanth Aravamudan page = alloc_pages(htlb_alloc_mask|__GFP_COMP| 299551883aeSNishanth Aravamudan __GFP_REPEAT|__GFP_NOWARN, 3007893d1d5SAdam Litke HUGETLB_PAGE_ORDER); 301d1c3fb1fSNishanth Aravamudan 3027893d1d5SAdam Litke spin_lock(&hugetlb_lock); 303d1c3fb1fSNishanth Aravamudan if (page) { 3042668db91SAdam Litke /* 3052668db91SAdam Litke * This page is now managed by the hugetlb allocator and has 3062668db91SAdam Litke * no users -- drop the buddy allocator's reference. 3072668db91SAdam Litke */ 3082668db91SAdam Litke put_page_testzero(page); 3092668db91SAdam Litke VM_BUG_ON(page_count(page)); 310d1c3fb1fSNishanth Aravamudan nid = page_to_nid(page); 311d1c3fb1fSNishanth Aravamudan set_compound_page_dtor(page, free_huge_page); 312d1c3fb1fSNishanth Aravamudan /* 313d1c3fb1fSNishanth Aravamudan * We incremented the global counters already 314d1c3fb1fSNishanth Aravamudan */ 315d1c3fb1fSNishanth Aravamudan nr_huge_pages_node[nid]++; 316d1c3fb1fSNishanth Aravamudan surplus_huge_pages_node[nid]++; 3173b116300SAdam Litke __count_vm_event(HTLB_BUDDY_PGALLOC); 318d1c3fb1fSNishanth Aravamudan } else { 319d1c3fb1fSNishanth Aravamudan nr_huge_pages--; 320d1c3fb1fSNishanth Aravamudan surplus_huge_pages--; 3213b116300SAdam Litke __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL); 3227893d1d5SAdam Litke } 323d1c3fb1fSNishanth Aravamudan spin_unlock(&hugetlb_lock); 3247893d1d5SAdam Litke 3257893d1d5SAdam Litke return page; 3267893d1d5SAdam Litke } 3277893d1d5SAdam Litke 328e4e574b7SAdam Litke /* 329e4e574b7SAdam Litke * Increase the hugetlb pool such that it can accomodate a reservation 330e4e574b7SAdam Litke * of size 'delta'. 331e4e574b7SAdam Litke */ 332e4e574b7SAdam Litke static int gather_surplus_pages(int delta) 333e4e574b7SAdam Litke { 334e4e574b7SAdam Litke struct list_head surplus_list; 335e4e574b7SAdam Litke struct page *page, *tmp; 336e4e574b7SAdam Litke int ret, i; 337e4e574b7SAdam Litke int needed, allocated; 338e4e574b7SAdam Litke 339e4e574b7SAdam Litke needed = (resv_huge_pages + delta) - free_huge_pages; 340ac09b3a1SAdam Litke if (needed <= 0) { 341ac09b3a1SAdam Litke resv_huge_pages += delta; 342e4e574b7SAdam Litke return 0; 343ac09b3a1SAdam Litke } 344e4e574b7SAdam Litke 345e4e574b7SAdam Litke allocated = 0; 346e4e574b7SAdam Litke INIT_LIST_HEAD(&surplus_list); 347e4e574b7SAdam Litke 348e4e574b7SAdam Litke ret = -ENOMEM; 349e4e574b7SAdam Litke retry: 350e4e574b7SAdam Litke spin_unlock(&hugetlb_lock); 351e4e574b7SAdam Litke for (i = 0; i < needed; i++) { 352e4e574b7SAdam Litke page = alloc_buddy_huge_page(NULL, 0); 353e4e574b7SAdam Litke if (!page) { 354e4e574b7SAdam Litke /* 355e4e574b7SAdam Litke * We were not able to allocate enough pages to 356e4e574b7SAdam Litke * satisfy the entire reservation so we free what 357e4e574b7SAdam Litke * we've allocated so far. 358e4e574b7SAdam Litke */ 359e4e574b7SAdam Litke spin_lock(&hugetlb_lock); 360e4e574b7SAdam Litke needed = 0; 361e4e574b7SAdam Litke goto free; 362e4e574b7SAdam Litke } 363e4e574b7SAdam Litke 364e4e574b7SAdam Litke list_add(&page->lru, &surplus_list); 365e4e574b7SAdam Litke } 366e4e574b7SAdam Litke allocated += needed; 367e4e574b7SAdam Litke 368e4e574b7SAdam Litke /* 369e4e574b7SAdam Litke * After retaking hugetlb_lock, we need to recalculate 'needed' 370e4e574b7SAdam Litke * because either resv_huge_pages or free_huge_pages may have changed. 371e4e574b7SAdam Litke */ 372e4e574b7SAdam Litke spin_lock(&hugetlb_lock); 373e4e574b7SAdam Litke needed = (resv_huge_pages + delta) - (free_huge_pages + allocated); 374e4e574b7SAdam Litke if (needed > 0) 375e4e574b7SAdam Litke goto retry; 376e4e574b7SAdam Litke 377e4e574b7SAdam Litke /* 378e4e574b7SAdam Litke * The surplus_list now contains _at_least_ the number of extra pages 379e4e574b7SAdam Litke * needed to accomodate the reservation. Add the appropriate number 380e4e574b7SAdam Litke * of pages to the hugetlb pool and free the extras back to the buddy 381ac09b3a1SAdam Litke * allocator. Commit the entire reservation here to prevent another 382ac09b3a1SAdam Litke * process from stealing the pages as they are added to the pool but 383ac09b3a1SAdam Litke * before they are reserved. 384e4e574b7SAdam Litke */ 385e4e574b7SAdam Litke needed += allocated; 386ac09b3a1SAdam Litke resv_huge_pages += delta; 387e4e574b7SAdam Litke ret = 0; 388e4e574b7SAdam Litke free: 38919fc3f0aSAdam Litke /* Free the needed pages to the hugetlb pool */ 39019fc3f0aSAdam Litke list_for_each_entry_safe(page, tmp, &surplus_list, lru) { 39119fc3f0aSAdam Litke if ((--needed) < 0) 39219fc3f0aSAdam Litke break; 39319fc3f0aSAdam Litke list_del(&page->lru); 39419fc3f0aSAdam Litke enqueue_huge_page(page); 39519fc3f0aSAdam Litke } 39619fc3f0aSAdam Litke 39719fc3f0aSAdam Litke /* Free unnecessary surplus pages to the buddy allocator */ 39819fc3f0aSAdam Litke if (!list_empty(&surplus_list)) { 39919fc3f0aSAdam Litke spin_unlock(&hugetlb_lock); 400e4e574b7SAdam Litke list_for_each_entry_safe(page, tmp, &surplus_list, lru) { 401e4e574b7SAdam Litke list_del(&page->lru); 402af767cbdSAdam Litke /* 4032668db91SAdam Litke * The page has a reference count of zero already, so 4042668db91SAdam Litke * call free_huge_page directly instead of using 4052668db91SAdam Litke * put_page. This must be done with hugetlb_lock 406af767cbdSAdam Litke * unlocked which is safe because free_huge_page takes 407af767cbdSAdam Litke * hugetlb_lock before deciding how to free the page. 408af767cbdSAdam Litke */ 4092668db91SAdam Litke free_huge_page(page); 410af767cbdSAdam Litke } 41119fc3f0aSAdam Litke spin_lock(&hugetlb_lock); 412e4e574b7SAdam Litke } 413e4e574b7SAdam Litke 414e4e574b7SAdam Litke return ret; 415e4e574b7SAdam Litke } 416e4e574b7SAdam Litke 417e4e574b7SAdam Litke /* 418e4e574b7SAdam Litke * When releasing a hugetlb pool reservation, any surplus pages that were 419e4e574b7SAdam Litke * allocated to satisfy the reservation must be explicitly freed if they were 420e4e574b7SAdam Litke * never used. 421e4e574b7SAdam Litke */ 4228cde045cSAdrian Bunk static void return_unused_surplus_pages(unsigned long unused_resv_pages) 423e4e574b7SAdam Litke { 424e4e574b7SAdam Litke static int nid = -1; 425e4e574b7SAdam Litke struct page *page; 426e4e574b7SAdam Litke unsigned long nr_pages; 427e4e574b7SAdam Litke 42811320d17SNishanth Aravamudan /* 42911320d17SNishanth Aravamudan * We want to release as many surplus pages as possible, spread 43011320d17SNishanth Aravamudan * evenly across all nodes. Iterate across all nodes until we 43111320d17SNishanth Aravamudan * can no longer free unreserved surplus pages. This occurs when 43211320d17SNishanth Aravamudan * the nodes with surplus pages have no free pages. 43311320d17SNishanth Aravamudan */ 43411320d17SNishanth Aravamudan unsigned long remaining_iterations = num_online_nodes(); 43511320d17SNishanth Aravamudan 436ac09b3a1SAdam Litke /* Uncommit the reservation */ 437ac09b3a1SAdam Litke resv_huge_pages -= unused_resv_pages; 438ac09b3a1SAdam Litke 439e4e574b7SAdam Litke nr_pages = min(unused_resv_pages, surplus_huge_pages); 440e4e574b7SAdam Litke 44111320d17SNishanth Aravamudan while (remaining_iterations-- && nr_pages) { 442e4e574b7SAdam Litke nid = next_node(nid, node_online_map); 443e4e574b7SAdam Litke if (nid == MAX_NUMNODES) 444e4e574b7SAdam Litke nid = first_node(node_online_map); 445e4e574b7SAdam Litke 446e4e574b7SAdam Litke if (!surplus_huge_pages_node[nid]) 447e4e574b7SAdam Litke continue; 448e4e574b7SAdam Litke 449e4e574b7SAdam Litke if (!list_empty(&hugepage_freelists[nid])) { 450e4e574b7SAdam Litke page = list_entry(hugepage_freelists[nid].next, 451e4e574b7SAdam Litke struct page, lru); 452e4e574b7SAdam Litke list_del(&page->lru); 453e4e574b7SAdam Litke update_and_free_page(page); 454e4e574b7SAdam Litke free_huge_pages--; 455e4e574b7SAdam Litke free_huge_pages_node[nid]--; 456e4e574b7SAdam Litke surplus_huge_pages--; 457e4e574b7SAdam Litke surplus_huge_pages_node[nid]--; 458e4e574b7SAdam Litke nr_pages--; 45911320d17SNishanth Aravamudan remaining_iterations = num_online_nodes(); 460e4e574b7SAdam Litke } 461e4e574b7SAdam Litke } 462e4e574b7SAdam Litke } 463e4e574b7SAdam Litke 464348ea204SAdam Litke 465348ea204SAdam Litke static struct page *alloc_huge_page_shared(struct vm_area_struct *vma, 466348ea204SAdam Litke unsigned long addr) 467348ea204SAdam Litke { 468348ea204SAdam Litke struct page *page; 469348ea204SAdam Litke 470348ea204SAdam Litke spin_lock(&hugetlb_lock); 471348e1e04SNishanth Aravamudan page = dequeue_huge_page_vma(vma, addr); 472348ea204SAdam Litke spin_unlock(&hugetlb_lock); 47390d8b7e6SAdam Litke return page ? page : ERR_PTR(-VM_FAULT_OOM); 474348ea204SAdam Litke } 475348ea204SAdam Litke 476348ea204SAdam Litke static struct page *alloc_huge_page_private(struct vm_area_struct *vma, 47727a85ef1SDavid Gibson unsigned long addr) 4781da177e4SLinus Torvalds { 4797893d1d5SAdam Litke struct page *page = NULL; 4801da177e4SLinus Torvalds 48190d8b7e6SAdam Litke if (hugetlb_get_quota(vma->vm_file->f_mapping, 1)) 48290d8b7e6SAdam Litke return ERR_PTR(-VM_FAULT_SIGBUS); 48390d8b7e6SAdam Litke 4841da177e4SLinus Torvalds spin_lock(&hugetlb_lock); 485348ea204SAdam Litke if (free_huge_pages > resv_huge_pages) 486348e1e04SNishanth Aravamudan page = dequeue_huge_page_vma(vma, addr); 487348ea204SAdam Litke spin_unlock(&hugetlb_lock); 48868842c9bSKen Chen if (!page) { 4897893d1d5SAdam Litke page = alloc_buddy_huge_page(vma, addr); 49068842c9bSKen Chen if (!page) { 49168842c9bSKen Chen hugetlb_put_quota(vma->vm_file->f_mapping, 1); 49268842c9bSKen Chen return ERR_PTR(-VM_FAULT_OOM); 49368842c9bSKen Chen } 49468842c9bSKen Chen } 49568842c9bSKen Chen return page; 496348ea204SAdam Litke } 4977893d1d5SAdam Litke 498348ea204SAdam Litke static struct page *alloc_huge_page(struct vm_area_struct *vma, 499348ea204SAdam Litke unsigned long addr) 500348ea204SAdam Litke { 501348ea204SAdam Litke struct page *page; 5022fc39cecSAdam Litke struct address_space *mapping = vma->vm_file->f_mapping; 5032fc39cecSAdam Litke 504348ea204SAdam Litke if (vma->vm_flags & VM_MAYSHARE) 505348ea204SAdam Litke page = alloc_huge_page_shared(vma, addr); 506348ea204SAdam Litke else 507348ea204SAdam Litke page = alloc_huge_page_private(vma, addr); 50890d8b7e6SAdam Litke 50990d8b7e6SAdam Litke if (!IS_ERR(page)) { 510348ea204SAdam Litke set_page_refcounted(page); 5112fc39cecSAdam Litke set_page_private(page, (unsigned long) mapping); 51290d8b7e6SAdam Litke } 5137893d1d5SAdam Litke return page; 514b45b5bd6SDavid Gibson } 515b45b5bd6SDavid Gibson 5161da177e4SLinus Torvalds static int __init hugetlb_init(void) 5171da177e4SLinus Torvalds { 5181da177e4SLinus Torvalds unsigned long i; 5191da177e4SLinus Torvalds 5203c726f8dSBenjamin Herrenschmidt if (HPAGE_SHIFT == 0) 5213c726f8dSBenjamin Herrenschmidt return 0; 5223c726f8dSBenjamin Herrenschmidt 5231da177e4SLinus Torvalds for (i = 0; i < MAX_NUMNODES; ++i) 5241da177e4SLinus Torvalds INIT_LIST_HEAD(&hugepage_freelists[i]); 5251da177e4SLinus Torvalds 52663b4613cSNishanth Aravamudan hugetlb_next_nid = first_node(node_online_map); 52763b4613cSNishanth Aravamudan 5281da177e4SLinus Torvalds for (i = 0; i < max_huge_pages; ++i) { 529a482289dSNick Piggin if (!alloc_fresh_huge_page()) 5301da177e4SLinus Torvalds break; 5311da177e4SLinus Torvalds } 5321da177e4SLinus Torvalds max_huge_pages = free_huge_pages = nr_huge_pages = i; 5331da177e4SLinus Torvalds printk("Total HugeTLB memory allocated, %ld\n", free_huge_pages); 5341da177e4SLinus Torvalds return 0; 5351da177e4SLinus Torvalds } 5361da177e4SLinus Torvalds module_init(hugetlb_init); 5371da177e4SLinus Torvalds 5381da177e4SLinus Torvalds static int __init hugetlb_setup(char *s) 5391da177e4SLinus Torvalds { 5401da177e4SLinus Torvalds if (sscanf(s, "%lu", &max_huge_pages) <= 0) 5411da177e4SLinus Torvalds max_huge_pages = 0; 5421da177e4SLinus Torvalds return 1; 5431da177e4SLinus Torvalds } 5441da177e4SLinus Torvalds __setup("hugepages=", hugetlb_setup); 5451da177e4SLinus Torvalds 5468a630112SKen Chen static unsigned int cpuset_mems_nr(unsigned int *array) 5478a630112SKen Chen { 5488a630112SKen Chen int node; 5498a630112SKen Chen unsigned int nr = 0; 5508a630112SKen Chen 5518a630112SKen Chen for_each_node_mask(node, cpuset_current_mems_allowed) 5528a630112SKen Chen nr += array[node]; 5538a630112SKen Chen 5548a630112SKen Chen return nr; 5558a630112SKen Chen } 5568a630112SKen Chen 5571da177e4SLinus Torvalds #ifdef CONFIG_SYSCTL 5581da177e4SLinus Torvalds #ifdef CONFIG_HIGHMEM 5591da177e4SLinus Torvalds static void try_to_free_low(unsigned long count) 5601da177e4SLinus Torvalds { 5614415cc8dSChristoph Lameter int i; 5624415cc8dSChristoph Lameter 5631da177e4SLinus Torvalds for (i = 0; i < MAX_NUMNODES; ++i) { 5641da177e4SLinus Torvalds struct page *page, *next; 5651da177e4SLinus Torvalds list_for_each_entry_safe(page, next, &hugepage_freelists[i], lru) { 5666b0c880dSAdam Litke if (count >= nr_huge_pages) 5676b0c880dSAdam Litke return; 5681da177e4SLinus Torvalds if (PageHighMem(page)) 5691da177e4SLinus Torvalds continue; 5701da177e4SLinus Torvalds list_del(&page->lru); 5711da177e4SLinus Torvalds update_and_free_page(page); 5721da177e4SLinus Torvalds free_huge_pages--; 5734415cc8dSChristoph Lameter free_huge_pages_node[page_to_nid(page)]--; 5741da177e4SLinus Torvalds } 5751da177e4SLinus Torvalds } 5761da177e4SLinus Torvalds } 5771da177e4SLinus Torvalds #else 5781da177e4SLinus Torvalds static inline void try_to_free_low(unsigned long count) 5791da177e4SLinus Torvalds { 5801da177e4SLinus Torvalds } 5811da177e4SLinus Torvalds #endif 5821da177e4SLinus Torvalds 5837893d1d5SAdam Litke #define persistent_huge_pages (nr_huge_pages - surplus_huge_pages) 5841da177e4SLinus Torvalds static unsigned long set_max_huge_pages(unsigned long count) 5851da177e4SLinus Torvalds { 5867893d1d5SAdam Litke unsigned long min_count, ret; 5871da177e4SLinus Torvalds 5887893d1d5SAdam Litke /* 5897893d1d5SAdam Litke * Increase the pool size 5907893d1d5SAdam Litke * First take pages out of surplus state. Then make up the 5917893d1d5SAdam Litke * remaining difference by allocating fresh huge pages. 592d1c3fb1fSNishanth Aravamudan * 593d1c3fb1fSNishanth Aravamudan * We might race with alloc_buddy_huge_page() here and be unable 594d1c3fb1fSNishanth Aravamudan * to convert a surplus huge page to a normal huge page. That is 595d1c3fb1fSNishanth Aravamudan * not critical, though, it just means the overall size of the 596d1c3fb1fSNishanth Aravamudan * pool might be one hugepage larger than it needs to be, but 597d1c3fb1fSNishanth Aravamudan * within all the constraints specified by the sysctls. 5987893d1d5SAdam Litke */ 5991da177e4SLinus Torvalds spin_lock(&hugetlb_lock); 6007893d1d5SAdam Litke while (surplus_huge_pages && count > persistent_huge_pages) { 6017893d1d5SAdam Litke if (!adjust_pool_surplus(-1)) 6027893d1d5SAdam Litke break; 6037893d1d5SAdam Litke } 6047893d1d5SAdam Litke 6057893d1d5SAdam Litke while (count > persistent_huge_pages) { 6067893d1d5SAdam Litke /* 6077893d1d5SAdam Litke * If this allocation races such that we no longer need the 6087893d1d5SAdam Litke * page, free_huge_page will handle it by freeing the page 6097893d1d5SAdam Litke * and reducing the surplus. 6107893d1d5SAdam Litke */ 6117893d1d5SAdam Litke spin_unlock(&hugetlb_lock); 6127893d1d5SAdam Litke ret = alloc_fresh_huge_page(); 6137893d1d5SAdam Litke spin_lock(&hugetlb_lock); 6147893d1d5SAdam Litke if (!ret) 6157893d1d5SAdam Litke goto out; 6167893d1d5SAdam Litke 6177893d1d5SAdam Litke } 6187893d1d5SAdam Litke 6197893d1d5SAdam Litke /* 6207893d1d5SAdam Litke * Decrease the pool size 6217893d1d5SAdam Litke * First return free pages to the buddy allocator (being careful 6227893d1d5SAdam Litke * to keep enough around to satisfy reservations). Then place 6237893d1d5SAdam Litke * pages into surplus state as needed so the pool will shrink 6247893d1d5SAdam Litke * to the desired size as pages become free. 625d1c3fb1fSNishanth Aravamudan * 626d1c3fb1fSNishanth Aravamudan * By placing pages into the surplus state independent of the 627d1c3fb1fSNishanth Aravamudan * overcommit value, we are allowing the surplus pool size to 628d1c3fb1fSNishanth Aravamudan * exceed overcommit. There are few sane options here. Since 629d1c3fb1fSNishanth Aravamudan * alloc_buddy_huge_page() is checking the global counter, 630d1c3fb1fSNishanth Aravamudan * though, we'll note that we're not allowed to exceed surplus 631d1c3fb1fSNishanth Aravamudan * and won't grow the pool anywhere else. Not until one of the 632d1c3fb1fSNishanth Aravamudan * sysctls are changed, or the surplus pages go out of use. 6337893d1d5SAdam Litke */ 6346b0c880dSAdam Litke min_count = resv_huge_pages + nr_huge_pages - free_huge_pages; 6356b0c880dSAdam Litke min_count = max(count, min_count); 6367893d1d5SAdam Litke try_to_free_low(min_count); 6377893d1d5SAdam Litke while (min_count < persistent_huge_pages) { 638348e1e04SNishanth Aravamudan struct page *page = dequeue_huge_page(); 6391da177e4SLinus Torvalds if (!page) 6401da177e4SLinus Torvalds break; 6411da177e4SLinus Torvalds update_and_free_page(page); 6421da177e4SLinus Torvalds } 6437893d1d5SAdam Litke while (count < persistent_huge_pages) { 6447893d1d5SAdam Litke if (!adjust_pool_surplus(1)) 6457893d1d5SAdam Litke break; 6467893d1d5SAdam Litke } 6477893d1d5SAdam Litke out: 6487893d1d5SAdam Litke ret = persistent_huge_pages; 6491da177e4SLinus Torvalds spin_unlock(&hugetlb_lock); 6507893d1d5SAdam Litke return ret; 6511da177e4SLinus Torvalds } 6521da177e4SLinus Torvalds 6531da177e4SLinus Torvalds int hugetlb_sysctl_handler(struct ctl_table *table, int write, 6541da177e4SLinus Torvalds struct file *file, void __user *buffer, 6551da177e4SLinus Torvalds size_t *length, loff_t *ppos) 6561da177e4SLinus Torvalds { 6571da177e4SLinus Torvalds proc_doulongvec_minmax(table, write, file, buffer, length, ppos); 6581da177e4SLinus Torvalds max_huge_pages = set_max_huge_pages(max_huge_pages); 6591da177e4SLinus Torvalds return 0; 6601da177e4SLinus Torvalds } 661396faf03SMel Gorman 662396faf03SMel Gorman int hugetlb_treat_movable_handler(struct ctl_table *table, int write, 663396faf03SMel Gorman struct file *file, void __user *buffer, 664396faf03SMel Gorman size_t *length, loff_t *ppos) 665396faf03SMel Gorman { 666396faf03SMel Gorman proc_dointvec(table, write, file, buffer, length, ppos); 667396faf03SMel Gorman if (hugepages_treat_as_movable) 668396faf03SMel Gorman htlb_alloc_mask = GFP_HIGHUSER_MOVABLE; 669396faf03SMel Gorman else 670396faf03SMel Gorman htlb_alloc_mask = GFP_HIGHUSER; 671396faf03SMel Gorman return 0; 672396faf03SMel Gorman } 673396faf03SMel Gorman 674a3d0c6aaSNishanth Aravamudan int hugetlb_overcommit_handler(struct ctl_table *table, int write, 675a3d0c6aaSNishanth Aravamudan struct file *file, void __user *buffer, 676a3d0c6aaSNishanth Aravamudan size_t *length, loff_t *ppos) 677a3d0c6aaSNishanth Aravamudan { 678a3d0c6aaSNishanth Aravamudan proc_doulongvec_minmax(table, write, file, buffer, length, ppos); 679064d9efeSNishanth Aravamudan spin_lock(&hugetlb_lock); 680064d9efeSNishanth Aravamudan nr_overcommit_huge_pages = sysctl_overcommit_huge_pages; 681a3d0c6aaSNishanth Aravamudan spin_unlock(&hugetlb_lock); 682a3d0c6aaSNishanth Aravamudan return 0; 683a3d0c6aaSNishanth Aravamudan } 684a3d0c6aaSNishanth Aravamudan 6851da177e4SLinus Torvalds #endif /* CONFIG_SYSCTL */ 6861da177e4SLinus Torvalds 6871da177e4SLinus Torvalds int hugetlb_report_meminfo(char *buf) 6881da177e4SLinus Torvalds { 6891da177e4SLinus Torvalds return sprintf(buf, 6901da177e4SLinus Torvalds "HugePages_Total: %5lu\n" 6911da177e4SLinus Torvalds "HugePages_Free: %5lu\n" 692b45b5bd6SDavid Gibson "HugePages_Rsvd: %5lu\n" 6937893d1d5SAdam Litke "HugePages_Surp: %5lu\n" 6941da177e4SLinus Torvalds "Hugepagesize: %5lu kB\n", 6951da177e4SLinus Torvalds nr_huge_pages, 6961da177e4SLinus Torvalds free_huge_pages, 697a43a8c39SChen, Kenneth W resv_huge_pages, 6987893d1d5SAdam Litke surplus_huge_pages, 6991da177e4SLinus Torvalds HPAGE_SIZE/1024); 7001da177e4SLinus Torvalds } 7011da177e4SLinus Torvalds 7021da177e4SLinus Torvalds int hugetlb_report_node_meminfo(int nid, char *buf) 7031da177e4SLinus Torvalds { 7041da177e4SLinus Torvalds return sprintf(buf, 7051da177e4SLinus Torvalds "Node %d HugePages_Total: %5u\n" 706a1de0919SNishanth Aravamudan "Node %d HugePages_Free: %5u\n" 707a1de0919SNishanth Aravamudan "Node %d HugePages_Surp: %5u\n", 7081da177e4SLinus Torvalds nid, nr_huge_pages_node[nid], 709a1de0919SNishanth Aravamudan nid, free_huge_pages_node[nid], 710a1de0919SNishanth Aravamudan nid, surplus_huge_pages_node[nid]); 7111da177e4SLinus Torvalds } 7121da177e4SLinus Torvalds 7131da177e4SLinus Torvalds /* Return the number pages of memory we physically have, in PAGE_SIZE units. */ 7141da177e4SLinus Torvalds unsigned long hugetlb_total_pages(void) 7151da177e4SLinus Torvalds { 7161da177e4SLinus Torvalds return nr_huge_pages * (HPAGE_SIZE / PAGE_SIZE); 7171da177e4SLinus Torvalds } 7181da177e4SLinus Torvalds 719fc1b8a73SMel Gorman static int hugetlb_acct_memory(long delta) 720fc1b8a73SMel Gorman { 721fc1b8a73SMel Gorman int ret = -ENOMEM; 722fc1b8a73SMel Gorman 723fc1b8a73SMel Gorman spin_lock(&hugetlb_lock); 724fc1b8a73SMel Gorman /* 725fc1b8a73SMel Gorman * When cpuset is configured, it breaks the strict hugetlb page 726fc1b8a73SMel Gorman * reservation as the accounting is done on a global variable. Such 727fc1b8a73SMel Gorman * reservation is completely rubbish in the presence of cpuset because 728fc1b8a73SMel Gorman * the reservation is not checked against page availability for the 729fc1b8a73SMel Gorman * current cpuset. Application can still potentially OOM'ed by kernel 730fc1b8a73SMel Gorman * with lack of free htlb page in cpuset that the task is in. 731fc1b8a73SMel Gorman * Attempt to enforce strict accounting with cpuset is almost 732fc1b8a73SMel Gorman * impossible (or too ugly) because cpuset is too fluid that 733fc1b8a73SMel Gorman * task or memory node can be dynamically moved between cpusets. 734fc1b8a73SMel Gorman * 735fc1b8a73SMel Gorman * The change of semantics for shared hugetlb mapping with cpuset is 736fc1b8a73SMel Gorman * undesirable. However, in order to preserve some of the semantics, 737fc1b8a73SMel Gorman * we fall back to check against current free page availability as 738fc1b8a73SMel Gorman * a best attempt and hopefully to minimize the impact of changing 739fc1b8a73SMel Gorman * semantics that cpuset has. 740fc1b8a73SMel Gorman */ 741fc1b8a73SMel Gorman if (delta > 0) { 742fc1b8a73SMel Gorman if (gather_surplus_pages(delta) < 0) 743fc1b8a73SMel Gorman goto out; 744fc1b8a73SMel Gorman 745fc1b8a73SMel Gorman if (delta > cpuset_mems_nr(free_huge_pages_node)) { 746fc1b8a73SMel Gorman return_unused_surplus_pages(delta); 747fc1b8a73SMel Gorman goto out; 748fc1b8a73SMel Gorman } 749fc1b8a73SMel Gorman } 750fc1b8a73SMel Gorman 751fc1b8a73SMel Gorman ret = 0; 752fc1b8a73SMel Gorman if (delta < 0) 753fc1b8a73SMel Gorman return_unused_surplus_pages((unsigned long) -delta); 754fc1b8a73SMel Gorman 755fc1b8a73SMel Gorman out: 756fc1b8a73SMel Gorman spin_unlock(&hugetlb_lock); 757fc1b8a73SMel Gorman return ret; 758fc1b8a73SMel Gorman } 759fc1b8a73SMel Gorman 7601da177e4SLinus Torvalds /* 7611da177e4SLinus Torvalds * We cannot handle pagefaults against hugetlb pages at all. They cause 7621da177e4SLinus Torvalds * handle_mm_fault() to try to instantiate regular-sized pages in the 7631da177e4SLinus Torvalds * hugegpage VMA. do_page_fault() is supposed to trap this, so BUG is we get 7641da177e4SLinus Torvalds * this far. 7651da177e4SLinus Torvalds */ 766d0217ac0SNick Piggin static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 7671da177e4SLinus Torvalds { 7681da177e4SLinus Torvalds BUG(); 769d0217ac0SNick Piggin return 0; 7701da177e4SLinus Torvalds } 7711da177e4SLinus Torvalds 7721da177e4SLinus Torvalds struct vm_operations_struct hugetlb_vm_ops = { 773d0217ac0SNick Piggin .fault = hugetlb_vm_op_fault, 7741da177e4SLinus Torvalds }; 7751da177e4SLinus Torvalds 7761e8f889bSDavid Gibson static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page, 7771e8f889bSDavid Gibson int writable) 77863551ae0SDavid Gibson { 77963551ae0SDavid Gibson pte_t entry; 78063551ae0SDavid Gibson 7811e8f889bSDavid Gibson if (writable) { 78263551ae0SDavid Gibson entry = 78363551ae0SDavid Gibson pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot))); 78463551ae0SDavid Gibson } else { 7857f2e9525SGerald Schaefer entry = huge_pte_wrprotect(mk_pte(page, vma->vm_page_prot)); 78663551ae0SDavid Gibson } 78763551ae0SDavid Gibson entry = pte_mkyoung(entry); 78863551ae0SDavid Gibson entry = pte_mkhuge(entry); 78963551ae0SDavid Gibson 79063551ae0SDavid Gibson return entry; 79163551ae0SDavid Gibson } 79263551ae0SDavid Gibson 7931e8f889bSDavid Gibson static void set_huge_ptep_writable(struct vm_area_struct *vma, 7941e8f889bSDavid Gibson unsigned long address, pte_t *ptep) 7951e8f889bSDavid Gibson { 7961e8f889bSDavid Gibson pte_t entry; 7971e8f889bSDavid Gibson 7987f2e9525SGerald Schaefer entry = pte_mkwrite(pte_mkdirty(huge_ptep_get(ptep))); 7997f2e9525SGerald Schaefer if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) { 8001e8f889bSDavid Gibson update_mmu_cache(vma, address, entry); 8011e8f889bSDavid Gibson } 8028dab5241SBenjamin Herrenschmidt } 8031e8f889bSDavid Gibson 8041e8f889bSDavid Gibson 80563551ae0SDavid Gibson int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, 80663551ae0SDavid Gibson struct vm_area_struct *vma) 80763551ae0SDavid Gibson { 80863551ae0SDavid Gibson pte_t *src_pte, *dst_pte, entry; 80963551ae0SDavid Gibson struct page *ptepage; 8101c59827dSHugh Dickins unsigned long addr; 8111e8f889bSDavid Gibson int cow; 8121e8f889bSDavid Gibson 8131e8f889bSDavid Gibson cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; 81463551ae0SDavid Gibson 8151c59827dSHugh Dickins for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) { 816c74df32cSHugh Dickins src_pte = huge_pte_offset(src, addr); 817c74df32cSHugh Dickins if (!src_pte) 818c74df32cSHugh Dickins continue; 81963551ae0SDavid Gibson dst_pte = huge_pte_alloc(dst, addr); 82063551ae0SDavid Gibson if (!dst_pte) 82163551ae0SDavid Gibson goto nomem; 822c5c99429SLarry Woodman 823c5c99429SLarry Woodman /* If the pagetables are shared don't copy or take references */ 824c5c99429SLarry Woodman if (dst_pte == src_pte) 825c5c99429SLarry Woodman continue; 826c5c99429SLarry Woodman 827c74df32cSHugh Dickins spin_lock(&dst->page_table_lock); 82846478758SNick Piggin spin_lock_nested(&src->page_table_lock, SINGLE_DEPTH_NESTING); 8297f2e9525SGerald Schaefer if (!huge_pte_none(huge_ptep_get(src_pte))) { 8301e8f889bSDavid Gibson if (cow) 8317f2e9525SGerald Schaefer huge_ptep_set_wrprotect(src, addr, src_pte); 8327f2e9525SGerald Schaefer entry = huge_ptep_get(src_pte); 83363551ae0SDavid Gibson ptepage = pte_page(entry); 83463551ae0SDavid Gibson get_page(ptepage); 83563551ae0SDavid Gibson set_huge_pte_at(dst, addr, dst_pte, entry); 8361c59827dSHugh Dickins } 8371c59827dSHugh Dickins spin_unlock(&src->page_table_lock); 838c74df32cSHugh Dickins spin_unlock(&dst->page_table_lock); 83963551ae0SDavid Gibson } 84063551ae0SDavid Gibson return 0; 84163551ae0SDavid Gibson 84263551ae0SDavid Gibson nomem: 84363551ae0SDavid Gibson return -ENOMEM; 84463551ae0SDavid Gibson } 84563551ae0SDavid Gibson 846502717f4SChen, Kenneth W void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, 84763551ae0SDavid Gibson unsigned long end) 84863551ae0SDavid Gibson { 84963551ae0SDavid Gibson struct mm_struct *mm = vma->vm_mm; 85063551ae0SDavid Gibson unsigned long address; 851c7546f8fSDavid Gibson pte_t *ptep; 85263551ae0SDavid Gibson pte_t pte; 85363551ae0SDavid Gibson struct page *page; 854fe1668aeSChen, Kenneth W struct page *tmp; 855c0a499c2SChen, Kenneth W /* 856c0a499c2SChen, Kenneth W * A page gathering list, protected by per file i_mmap_lock. The 857c0a499c2SChen, Kenneth W * lock is used to avoid list corruption from multiple unmapping 858c0a499c2SChen, Kenneth W * of the same page since we are using page->lru. 859c0a499c2SChen, Kenneth W */ 860fe1668aeSChen, Kenneth W LIST_HEAD(page_list); 86163551ae0SDavid Gibson 86263551ae0SDavid Gibson WARN_ON(!is_vm_hugetlb_page(vma)); 86363551ae0SDavid Gibson BUG_ON(start & ~HPAGE_MASK); 86463551ae0SDavid Gibson BUG_ON(end & ~HPAGE_MASK); 86563551ae0SDavid Gibson 866508034a3SHugh Dickins spin_lock(&mm->page_table_lock); 86763551ae0SDavid Gibson for (address = start; address < end; address += HPAGE_SIZE) { 868c7546f8fSDavid Gibson ptep = huge_pte_offset(mm, address); 869c7546f8fSDavid Gibson if (!ptep) 870c7546f8fSDavid Gibson continue; 871c7546f8fSDavid Gibson 87239dde65cSChen, Kenneth W if (huge_pmd_unshare(mm, &address, ptep)) 87339dde65cSChen, Kenneth W continue; 87439dde65cSChen, Kenneth W 875c7546f8fSDavid Gibson pte = huge_ptep_get_and_clear(mm, address, ptep); 8767f2e9525SGerald Schaefer if (huge_pte_none(pte)) 87763551ae0SDavid Gibson continue; 878c7546f8fSDavid Gibson 87963551ae0SDavid Gibson page = pte_page(pte); 8806649a386SKen Chen if (pte_dirty(pte)) 8816649a386SKen Chen set_page_dirty(page); 882fe1668aeSChen, Kenneth W list_add(&page->lru, &page_list); 88363551ae0SDavid Gibson } 8841da177e4SLinus Torvalds spin_unlock(&mm->page_table_lock); 885508034a3SHugh Dickins flush_tlb_range(vma, start, end); 886fe1668aeSChen, Kenneth W list_for_each_entry_safe(page, tmp, &page_list, lru) { 887fe1668aeSChen, Kenneth W list_del(&page->lru); 888fe1668aeSChen, Kenneth W put_page(page); 889fe1668aeSChen, Kenneth W } 8901da177e4SLinus Torvalds } 89163551ae0SDavid Gibson 892502717f4SChen, Kenneth W void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, 893502717f4SChen, Kenneth W unsigned long end) 894502717f4SChen, Kenneth W { 895502717f4SChen, Kenneth W /* 896502717f4SChen, Kenneth W * It is undesirable to test vma->vm_file as it should be non-null 897502717f4SChen, Kenneth W * for valid hugetlb area. However, vm_file will be NULL in the error 898502717f4SChen, Kenneth W * cleanup path of do_mmap_pgoff. When hugetlbfs ->mmap method fails, 899502717f4SChen, Kenneth W * do_mmap_pgoff() nullifies vma->vm_file before calling this function 900502717f4SChen, Kenneth W * to clean up. Since no pte has actually been setup, it is safe to 901502717f4SChen, Kenneth W * do nothing in this case. 902502717f4SChen, Kenneth W */ 903502717f4SChen, Kenneth W if (vma->vm_file) { 904502717f4SChen, Kenneth W spin_lock(&vma->vm_file->f_mapping->i_mmap_lock); 905502717f4SChen, Kenneth W __unmap_hugepage_range(vma, start, end); 906502717f4SChen, Kenneth W spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock); 907502717f4SChen, Kenneth W } 908502717f4SChen, Kenneth W } 909502717f4SChen, Kenneth W 9101e8f889bSDavid Gibson static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, 9111e8f889bSDavid Gibson unsigned long address, pte_t *ptep, pte_t pte) 9121e8f889bSDavid Gibson { 9131e8f889bSDavid Gibson struct page *old_page, *new_page; 91479ac6ba4SDavid Gibson int avoidcopy; 9151e8f889bSDavid Gibson 9161e8f889bSDavid Gibson old_page = pte_page(pte); 9171e8f889bSDavid Gibson 9181e8f889bSDavid Gibson /* If no-one else is actually using this page, avoid the copy 9191e8f889bSDavid Gibson * and just make the page writable */ 9201e8f889bSDavid Gibson avoidcopy = (page_count(old_page) == 1); 9211e8f889bSDavid Gibson if (avoidcopy) { 9221e8f889bSDavid Gibson set_huge_ptep_writable(vma, address, ptep); 92383c54070SNick Piggin return 0; 9241e8f889bSDavid Gibson } 9251e8f889bSDavid Gibson 9261e8f889bSDavid Gibson page_cache_get(old_page); 9275da7ca86SChristoph Lameter new_page = alloc_huge_page(vma, address); 9281e8f889bSDavid Gibson 9292fc39cecSAdam Litke if (IS_ERR(new_page)) { 9301e8f889bSDavid Gibson page_cache_release(old_page); 9312fc39cecSAdam Litke return -PTR_ERR(new_page); 9321e8f889bSDavid Gibson } 9331e8f889bSDavid Gibson 9341e8f889bSDavid Gibson spin_unlock(&mm->page_table_lock); 9359de455b2SAtsushi Nemoto copy_huge_page(new_page, old_page, address, vma); 9360ed361deSNick Piggin __SetPageUptodate(new_page); 9371e8f889bSDavid Gibson spin_lock(&mm->page_table_lock); 9381e8f889bSDavid Gibson 9391e8f889bSDavid Gibson ptep = huge_pte_offset(mm, address & HPAGE_MASK); 9407f2e9525SGerald Schaefer if (likely(pte_same(huge_ptep_get(ptep), pte))) { 9411e8f889bSDavid Gibson /* Break COW */ 9428fe627ecSGerald Schaefer huge_ptep_clear_flush(vma, address, ptep); 9431e8f889bSDavid Gibson set_huge_pte_at(mm, address, ptep, 9441e8f889bSDavid Gibson make_huge_pte(vma, new_page, 1)); 9451e8f889bSDavid Gibson /* Make the old page be freed below */ 9461e8f889bSDavid Gibson new_page = old_page; 9471e8f889bSDavid Gibson } 9481e8f889bSDavid Gibson page_cache_release(new_page); 9491e8f889bSDavid Gibson page_cache_release(old_page); 95083c54070SNick Piggin return 0; 9511e8f889bSDavid Gibson } 9521e8f889bSDavid Gibson 953a1ed3ddaSRobert P. J. Day static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, 9541e8f889bSDavid Gibson unsigned long address, pte_t *ptep, int write_access) 955ac9b9c66SHugh Dickins { 956ac9b9c66SHugh Dickins int ret = VM_FAULT_SIGBUS; 9574c887265SAdam Litke unsigned long idx; 9584c887265SAdam Litke unsigned long size; 9594c887265SAdam Litke struct page *page; 9604c887265SAdam Litke struct address_space *mapping; 9611e8f889bSDavid Gibson pte_t new_pte; 9624c887265SAdam Litke 9634c887265SAdam Litke mapping = vma->vm_file->f_mapping; 9644c887265SAdam Litke idx = ((address - vma->vm_start) >> HPAGE_SHIFT) 9654c887265SAdam Litke + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT)); 9664c887265SAdam Litke 9674c887265SAdam Litke /* 9684c887265SAdam Litke * Use page lock to guard against racing truncation 9694c887265SAdam Litke * before we get page_table_lock. 9704c887265SAdam Litke */ 9716bda666aSChristoph Lameter retry: 9726bda666aSChristoph Lameter page = find_lock_page(mapping, idx); 9736bda666aSChristoph Lameter if (!page) { 974ebed4bfcSHugh Dickins size = i_size_read(mapping->host) >> HPAGE_SHIFT; 975ebed4bfcSHugh Dickins if (idx >= size) 976ebed4bfcSHugh Dickins goto out; 9776bda666aSChristoph Lameter page = alloc_huge_page(vma, address); 9782fc39cecSAdam Litke if (IS_ERR(page)) { 9792fc39cecSAdam Litke ret = -PTR_ERR(page); 9806bda666aSChristoph Lameter goto out; 9816bda666aSChristoph Lameter } 98279ac6ba4SDavid Gibson clear_huge_page(page, address); 9830ed361deSNick Piggin __SetPageUptodate(page); 984ac9b9c66SHugh Dickins 9856bda666aSChristoph Lameter if (vma->vm_flags & VM_SHARED) { 9866bda666aSChristoph Lameter int err; 98745c682a6SKen Chen struct inode *inode = mapping->host; 9886bda666aSChristoph Lameter 9896bda666aSChristoph Lameter err = add_to_page_cache(page, mapping, idx, GFP_KERNEL); 9906bda666aSChristoph Lameter if (err) { 9916bda666aSChristoph Lameter put_page(page); 9926bda666aSChristoph Lameter if (err == -EEXIST) 9936bda666aSChristoph Lameter goto retry; 9946bda666aSChristoph Lameter goto out; 9956bda666aSChristoph Lameter } 99645c682a6SKen Chen 99745c682a6SKen Chen spin_lock(&inode->i_lock); 99845c682a6SKen Chen inode->i_blocks += BLOCKS_PER_HUGEPAGE; 99945c682a6SKen Chen spin_unlock(&inode->i_lock); 10006bda666aSChristoph Lameter } else 10016bda666aSChristoph Lameter lock_page(page); 10026bda666aSChristoph Lameter } 10031e8f889bSDavid Gibson 1004ac9b9c66SHugh Dickins spin_lock(&mm->page_table_lock); 10054c887265SAdam Litke size = i_size_read(mapping->host) >> HPAGE_SHIFT; 10064c887265SAdam Litke if (idx >= size) 10074c887265SAdam Litke goto backout; 10084c887265SAdam Litke 100983c54070SNick Piggin ret = 0; 10107f2e9525SGerald Schaefer if (!huge_pte_none(huge_ptep_get(ptep))) 10114c887265SAdam Litke goto backout; 10124c887265SAdam Litke 10131e8f889bSDavid Gibson new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE) 10141e8f889bSDavid Gibson && (vma->vm_flags & VM_SHARED))); 10151e8f889bSDavid Gibson set_huge_pte_at(mm, address, ptep, new_pte); 10161e8f889bSDavid Gibson 10171e8f889bSDavid Gibson if (write_access && !(vma->vm_flags & VM_SHARED)) { 10181e8f889bSDavid Gibson /* Optimization, do the COW without a second fault */ 10191e8f889bSDavid Gibson ret = hugetlb_cow(mm, vma, address, ptep, new_pte); 10201e8f889bSDavid Gibson } 10211e8f889bSDavid Gibson 1022ac9b9c66SHugh Dickins spin_unlock(&mm->page_table_lock); 10234c887265SAdam Litke unlock_page(page); 10244c887265SAdam Litke out: 1025ac9b9c66SHugh Dickins return ret; 10264c887265SAdam Litke 10274c887265SAdam Litke backout: 10284c887265SAdam Litke spin_unlock(&mm->page_table_lock); 10294c887265SAdam Litke unlock_page(page); 10304c887265SAdam Litke put_page(page); 10314c887265SAdam Litke goto out; 1032ac9b9c66SHugh Dickins } 1033ac9b9c66SHugh Dickins 103486e5216fSAdam Litke int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, 103586e5216fSAdam Litke unsigned long address, int write_access) 103686e5216fSAdam Litke { 103786e5216fSAdam Litke pte_t *ptep; 103886e5216fSAdam Litke pte_t entry; 10391e8f889bSDavid Gibson int ret; 10403935baa9SDavid Gibson static DEFINE_MUTEX(hugetlb_instantiation_mutex); 104186e5216fSAdam Litke 104286e5216fSAdam Litke ptep = huge_pte_alloc(mm, address); 104386e5216fSAdam Litke if (!ptep) 104486e5216fSAdam Litke return VM_FAULT_OOM; 104586e5216fSAdam Litke 10463935baa9SDavid Gibson /* 10473935baa9SDavid Gibson * Serialize hugepage allocation and instantiation, so that we don't 10483935baa9SDavid Gibson * get spurious allocation failures if two CPUs race to instantiate 10493935baa9SDavid Gibson * the same page in the page cache. 10503935baa9SDavid Gibson */ 10513935baa9SDavid Gibson mutex_lock(&hugetlb_instantiation_mutex); 10527f2e9525SGerald Schaefer entry = huge_ptep_get(ptep); 10537f2e9525SGerald Schaefer if (huge_pte_none(entry)) { 10543935baa9SDavid Gibson ret = hugetlb_no_page(mm, vma, address, ptep, write_access); 10553935baa9SDavid Gibson mutex_unlock(&hugetlb_instantiation_mutex); 10563935baa9SDavid Gibson return ret; 10573935baa9SDavid Gibson } 105886e5216fSAdam Litke 105983c54070SNick Piggin ret = 0; 10601e8f889bSDavid Gibson 10611e8f889bSDavid Gibson spin_lock(&mm->page_table_lock); 10621e8f889bSDavid Gibson /* Check for a racing update before calling hugetlb_cow */ 10637f2e9525SGerald Schaefer if (likely(pte_same(entry, huge_ptep_get(ptep)))) 10641e8f889bSDavid Gibson if (write_access && !pte_write(entry)) 10651e8f889bSDavid Gibson ret = hugetlb_cow(mm, vma, address, ptep, entry); 10661e8f889bSDavid Gibson spin_unlock(&mm->page_table_lock); 10673935baa9SDavid Gibson mutex_unlock(&hugetlb_instantiation_mutex); 10681e8f889bSDavid Gibson 10691e8f889bSDavid Gibson return ret; 107086e5216fSAdam Litke } 107186e5216fSAdam Litke 107263551ae0SDavid Gibson int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, 107363551ae0SDavid Gibson struct page **pages, struct vm_area_struct **vmas, 10745b23dbe8SAdam Litke unsigned long *position, int *length, int i, 10755b23dbe8SAdam Litke int write) 107663551ae0SDavid Gibson { 1077d5d4b0aaSChen, Kenneth W unsigned long pfn_offset; 1078d5d4b0aaSChen, Kenneth W unsigned long vaddr = *position; 107963551ae0SDavid Gibson int remainder = *length; 108063551ae0SDavid Gibson 10811c59827dSHugh Dickins spin_lock(&mm->page_table_lock); 108263551ae0SDavid Gibson while (vaddr < vma->vm_end && remainder) { 108363551ae0SDavid Gibson pte_t *pte; 108463551ae0SDavid Gibson struct page *page; 108563551ae0SDavid Gibson 10864c887265SAdam Litke /* 10874c887265SAdam Litke * Some archs (sparc64, sh*) have multiple pte_ts to 10884c887265SAdam Litke * each hugepage. We have to make * sure we get the 10894c887265SAdam Litke * first, for the page indexing below to work. 10904c887265SAdam Litke */ 109163551ae0SDavid Gibson pte = huge_pte_offset(mm, vaddr & HPAGE_MASK); 109263551ae0SDavid Gibson 10937f2e9525SGerald Schaefer if (!pte || huge_pte_none(huge_ptep_get(pte)) || 10947f2e9525SGerald Schaefer (write && !pte_write(huge_ptep_get(pte)))) { 10954c887265SAdam Litke int ret; 10964c887265SAdam Litke 10974c887265SAdam Litke spin_unlock(&mm->page_table_lock); 10985b23dbe8SAdam Litke ret = hugetlb_fault(mm, vma, vaddr, write); 10994c887265SAdam Litke spin_lock(&mm->page_table_lock); 1100a89182c7SAdam Litke if (!(ret & VM_FAULT_ERROR)) 11014c887265SAdam Litke continue; 11024c887265SAdam Litke 11031c59827dSHugh Dickins remainder = 0; 11041c59827dSHugh Dickins if (!i) 11051c59827dSHugh Dickins i = -EFAULT; 11061c59827dSHugh Dickins break; 11071c59827dSHugh Dickins } 110863551ae0SDavid Gibson 1109d5d4b0aaSChen, Kenneth W pfn_offset = (vaddr & ~HPAGE_MASK) >> PAGE_SHIFT; 11107f2e9525SGerald Schaefer page = pte_page(huge_ptep_get(pte)); 1111d5d4b0aaSChen, Kenneth W same_page: 1112d6692183SChen, Kenneth W if (pages) { 111363551ae0SDavid Gibson get_page(page); 1114d5d4b0aaSChen, Kenneth W pages[i] = page + pfn_offset; 1115d6692183SChen, Kenneth W } 111663551ae0SDavid Gibson 111763551ae0SDavid Gibson if (vmas) 111863551ae0SDavid Gibson vmas[i] = vma; 111963551ae0SDavid Gibson 112063551ae0SDavid Gibson vaddr += PAGE_SIZE; 1121d5d4b0aaSChen, Kenneth W ++pfn_offset; 112263551ae0SDavid Gibson --remainder; 112363551ae0SDavid Gibson ++i; 1124d5d4b0aaSChen, Kenneth W if (vaddr < vma->vm_end && remainder && 1125d5d4b0aaSChen, Kenneth W pfn_offset < HPAGE_SIZE/PAGE_SIZE) { 1126d5d4b0aaSChen, Kenneth W /* 1127d5d4b0aaSChen, Kenneth W * We use pfn_offset to avoid touching the pageframes 1128d5d4b0aaSChen, Kenneth W * of this compound page. 1129d5d4b0aaSChen, Kenneth W */ 1130d5d4b0aaSChen, Kenneth W goto same_page; 1131d5d4b0aaSChen, Kenneth W } 113263551ae0SDavid Gibson } 11331c59827dSHugh Dickins spin_unlock(&mm->page_table_lock); 113463551ae0SDavid Gibson *length = remainder; 113563551ae0SDavid Gibson *position = vaddr; 113663551ae0SDavid Gibson 113763551ae0SDavid Gibson return i; 113863551ae0SDavid Gibson } 11398f860591SZhang, Yanmin 11408f860591SZhang, Yanmin void hugetlb_change_protection(struct vm_area_struct *vma, 11418f860591SZhang, Yanmin unsigned long address, unsigned long end, pgprot_t newprot) 11428f860591SZhang, Yanmin { 11438f860591SZhang, Yanmin struct mm_struct *mm = vma->vm_mm; 11448f860591SZhang, Yanmin unsigned long start = address; 11458f860591SZhang, Yanmin pte_t *ptep; 11468f860591SZhang, Yanmin pte_t pte; 11478f860591SZhang, Yanmin 11488f860591SZhang, Yanmin BUG_ON(address >= end); 11498f860591SZhang, Yanmin flush_cache_range(vma, address, end); 11508f860591SZhang, Yanmin 115139dde65cSChen, Kenneth W spin_lock(&vma->vm_file->f_mapping->i_mmap_lock); 11528f860591SZhang, Yanmin spin_lock(&mm->page_table_lock); 11538f860591SZhang, Yanmin for (; address < end; address += HPAGE_SIZE) { 11548f860591SZhang, Yanmin ptep = huge_pte_offset(mm, address); 11558f860591SZhang, Yanmin if (!ptep) 11568f860591SZhang, Yanmin continue; 115739dde65cSChen, Kenneth W if (huge_pmd_unshare(mm, &address, ptep)) 115839dde65cSChen, Kenneth W continue; 11597f2e9525SGerald Schaefer if (!huge_pte_none(huge_ptep_get(ptep))) { 11608f860591SZhang, Yanmin pte = huge_ptep_get_and_clear(mm, address, ptep); 11618f860591SZhang, Yanmin pte = pte_mkhuge(pte_modify(pte, newprot)); 11628f860591SZhang, Yanmin set_huge_pte_at(mm, address, ptep, pte); 11638f860591SZhang, Yanmin } 11648f860591SZhang, Yanmin } 11658f860591SZhang, Yanmin spin_unlock(&mm->page_table_lock); 116639dde65cSChen, Kenneth W spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock); 11678f860591SZhang, Yanmin 11688f860591SZhang, Yanmin flush_tlb_range(vma, start, end); 11698f860591SZhang, Yanmin } 11708f860591SZhang, Yanmin 1171a43a8c39SChen, Kenneth W struct file_region { 1172a43a8c39SChen, Kenneth W struct list_head link; 1173a43a8c39SChen, Kenneth W long from; 1174a43a8c39SChen, Kenneth W long to; 1175a43a8c39SChen, Kenneth W }; 1176a43a8c39SChen, Kenneth W 1177a43a8c39SChen, Kenneth W static long region_add(struct list_head *head, long f, long t) 1178a43a8c39SChen, Kenneth W { 1179a43a8c39SChen, Kenneth W struct file_region *rg, *nrg, *trg; 1180a43a8c39SChen, Kenneth W 1181a43a8c39SChen, Kenneth W /* Locate the region we are either in or before. */ 1182a43a8c39SChen, Kenneth W list_for_each_entry(rg, head, link) 1183a43a8c39SChen, Kenneth W if (f <= rg->to) 1184a43a8c39SChen, Kenneth W break; 1185a43a8c39SChen, Kenneth W 1186a43a8c39SChen, Kenneth W /* Round our left edge to the current segment if it encloses us. */ 1187a43a8c39SChen, Kenneth W if (f > rg->from) 1188a43a8c39SChen, Kenneth W f = rg->from; 1189a43a8c39SChen, Kenneth W 1190a43a8c39SChen, Kenneth W /* Check for and consume any regions we now overlap with. */ 1191a43a8c39SChen, Kenneth W nrg = rg; 1192a43a8c39SChen, Kenneth W list_for_each_entry_safe(rg, trg, rg->link.prev, link) { 1193a43a8c39SChen, Kenneth W if (&rg->link == head) 1194a43a8c39SChen, Kenneth W break; 1195a43a8c39SChen, Kenneth W if (rg->from > t) 1196a43a8c39SChen, Kenneth W break; 1197a43a8c39SChen, Kenneth W 1198a43a8c39SChen, Kenneth W /* If this area reaches higher then extend our area to 1199a43a8c39SChen, Kenneth W * include it completely. If this is not the first area 1200a43a8c39SChen, Kenneth W * which we intend to reuse, free it. */ 1201a43a8c39SChen, Kenneth W if (rg->to > t) 1202a43a8c39SChen, Kenneth W t = rg->to; 1203a43a8c39SChen, Kenneth W if (rg != nrg) { 1204a43a8c39SChen, Kenneth W list_del(&rg->link); 1205a43a8c39SChen, Kenneth W kfree(rg); 1206a43a8c39SChen, Kenneth W } 1207a43a8c39SChen, Kenneth W } 1208a43a8c39SChen, Kenneth W nrg->from = f; 1209a43a8c39SChen, Kenneth W nrg->to = t; 1210a43a8c39SChen, Kenneth W return 0; 1211a43a8c39SChen, Kenneth W } 1212a43a8c39SChen, Kenneth W 1213a43a8c39SChen, Kenneth W static long region_chg(struct list_head *head, long f, long t) 1214a43a8c39SChen, Kenneth W { 1215a43a8c39SChen, Kenneth W struct file_region *rg, *nrg; 1216a43a8c39SChen, Kenneth W long chg = 0; 1217a43a8c39SChen, Kenneth W 1218a43a8c39SChen, Kenneth W /* Locate the region we are before or in. */ 1219a43a8c39SChen, Kenneth W list_for_each_entry(rg, head, link) 1220a43a8c39SChen, Kenneth W if (f <= rg->to) 1221a43a8c39SChen, Kenneth W break; 1222a43a8c39SChen, Kenneth W 1223a43a8c39SChen, Kenneth W /* If we are below the current region then a new region is required. 1224a43a8c39SChen, Kenneth W * Subtle, allocate a new region at the position but make it zero 1225183ff22bSSimon Arlott * size such that we can guarantee to record the reservation. */ 1226a43a8c39SChen, Kenneth W if (&rg->link == head || t < rg->from) { 1227a43a8c39SChen, Kenneth W nrg = kmalloc(sizeof(*nrg), GFP_KERNEL); 1228c80544dcSStephen Hemminger if (!nrg) 1229a43a8c39SChen, Kenneth W return -ENOMEM; 1230a43a8c39SChen, Kenneth W nrg->from = f; 1231a43a8c39SChen, Kenneth W nrg->to = f; 1232a43a8c39SChen, Kenneth W INIT_LIST_HEAD(&nrg->link); 1233a43a8c39SChen, Kenneth W list_add(&nrg->link, rg->link.prev); 1234a43a8c39SChen, Kenneth W 1235a43a8c39SChen, Kenneth W return t - f; 1236a43a8c39SChen, Kenneth W } 1237a43a8c39SChen, Kenneth W 1238a43a8c39SChen, Kenneth W /* Round our left edge to the current segment if it encloses us. */ 1239a43a8c39SChen, Kenneth W if (f > rg->from) 1240a43a8c39SChen, Kenneth W f = rg->from; 1241a43a8c39SChen, Kenneth W chg = t - f; 1242a43a8c39SChen, Kenneth W 1243a43a8c39SChen, Kenneth W /* Check for and consume any regions we now overlap with. */ 1244a43a8c39SChen, Kenneth W list_for_each_entry(rg, rg->link.prev, link) { 1245a43a8c39SChen, Kenneth W if (&rg->link == head) 1246a43a8c39SChen, Kenneth W break; 1247a43a8c39SChen, Kenneth W if (rg->from > t) 1248a43a8c39SChen, Kenneth W return chg; 1249a43a8c39SChen, Kenneth W 1250a43a8c39SChen, Kenneth W /* We overlap with this area, if it extends futher than 1251a43a8c39SChen, Kenneth W * us then we must extend ourselves. Account for its 1252a43a8c39SChen, Kenneth W * existing reservation. */ 1253a43a8c39SChen, Kenneth W if (rg->to > t) { 1254a43a8c39SChen, Kenneth W chg += rg->to - t; 1255a43a8c39SChen, Kenneth W t = rg->to; 1256a43a8c39SChen, Kenneth W } 1257a43a8c39SChen, Kenneth W chg -= rg->to - rg->from; 1258a43a8c39SChen, Kenneth W } 1259a43a8c39SChen, Kenneth W return chg; 1260a43a8c39SChen, Kenneth W } 1261a43a8c39SChen, Kenneth W 1262a43a8c39SChen, Kenneth W static long region_truncate(struct list_head *head, long end) 1263a43a8c39SChen, Kenneth W { 1264a43a8c39SChen, Kenneth W struct file_region *rg, *trg; 1265a43a8c39SChen, Kenneth W long chg = 0; 1266a43a8c39SChen, Kenneth W 1267a43a8c39SChen, Kenneth W /* Locate the region we are either in or before. */ 1268a43a8c39SChen, Kenneth W list_for_each_entry(rg, head, link) 1269a43a8c39SChen, Kenneth W if (end <= rg->to) 1270a43a8c39SChen, Kenneth W break; 1271a43a8c39SChen, Kenneth W if (&rg->link == head) 1272a43a8c39SChen, Kenneth W return 0; 1273a43a8c39SChen, Kenneth W 1274a43a8c39SChen, Kenneth W /* If we are in the middle of a region then adjust it. */ 1275a43a8c39SChen, Kenneth W if (end > rg->from) { 1276a43a8c39SChen, Kenneth W chg = rg->to - end; 1277a43a8c39SChen, Kenneth W rg->to = end; 1278a43a8c39SChen, Kenneth W rg = list_entry(rg->link.next, typeof(*rg), link); 1279a43a8c39SChen, Kenneth W } 1280a43a8c39SChen, Kenneth W 1281a43a8c39SChen, Kenneth W /* Drop any remaining regions. */ 1282a43a8c39SChen, Kenneth W list_for_each_entry_safe(rg, trg, rg->link.prev, link) { 1283a43a8c39SChen, Kenneth W if (&rg->link == head) 1284a43a8c39SChen, Kenneth W break; 1285a43a8c39SChen, Kenneth W chg += rg->to - rg->from; 1286a43a8c39SChen, Kenneth W list_del(&rg->link); 1287a43a8c39SChen, Kenneth W kfree(rg); 1288a43a8c39SChen, Kenneth W } 1289a43a8c39SChen, Kenneth W return chg; 1290a43a8c39SChen, Kenneth W } 1291a43a8c39SChen, Kenneth W 1292e4e574b7SAdam Litke int hugetlb_reserve_pages(struct inode *inode, long from, long to) 1293e4e574b7SAdam Litke { 1294e4e574b7SAdam Litke long ret, chg; 1295e4e574b7SAdam Litke 1296e4e574b7SAdam Litke chg = region_chg(&inode->i_mapping->private_list, from, to); 1297e4e574b7SAdam Litke if (chg < 0) 1298e4e574b7SAdam Litke return chg; 12998a630112SKen Chen 130090d8b7e6SAdam Litke if (hugetlb_get_quota(inode->i_mapping, chg)) 130190d8b7e6SAdam Litke return -ENOSPC; 1302a43a8c39SChen, Kenneth W ret = hugetlb_acct_memory(chg); 130368842c9bSKen Chen if (ret < 0) { 130468842c9bSKen Chen hugetlb_put_quota(inode->i_mapping, chg); 1305a43a8c39SChen, Kenneth W return ret; 130668842c9bSKen Chen } 1307a43a8c39SChen, Kenneth W region_add(&inode->i_mapping->private_list, from, to); 1308a43a8c39SChen, Kenneth W return 0; 1309a43a8c39SChen, Kenneth W } 1310a43a8c39SChen, Kenneth W 1311a43a8c39SChen, Kenneth W void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed) 1312a43a8c39SChen, Kenneth W { 1313a43a8c39SChen, Kenneth W long chg = region_truncate(&inode->i_mapping->private_list, offset); 131445c682a6SKen Chen 131545c682a6SKen Chen spin_lock(&inode->i_lock); 131645c682a6SKen Chen inode->i_blocks -= BLOCKS_PER_HUGEPAGE * freed; 131745c682a6SKen Chen spin_unlock(&inode->i_lock); 131845c682a6SKen Chen 131990d8b7e6SAdam Litke hugetlb_put_quota(inode->i_mapping, (chg - freed)); 132090d8b7e6SAdam Litke hugetlb_acct_memory(-(chg - freed)); 1321a43a8c39SChen, Kenneth W } 1322