11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * Generic hugetlb support. 31da177e4SLinus Torvalds * (C) William Irwin, April 2004 41da177e4SLinus Torvalds */ 51da177e4SLinus Torvalds #include <linux/gfp.h> 61da177e4SLinus Torvalds #include <linux/list.h> 71da177e4SLinus Torvalds #include <linux/init.h> 81da177e4SLinus Torvalds #include <linux/module.h> 91da177e4SLinus Torvalds #include <linux/mm.h> 101da177e4SLinus Torvalds #include <linux/sysctl.h> 111da177e4SLinus Torvalds #include <linux/highmem.h> 121da177e4SLinus Torvalds #include <linux/nodemask.h> 1363551ae0SDavid Gibson #include <linux/pagemap.h> 145da7ca86SChristoph Lameter #include <linux/mempolicy.h> 15aea47ff3SChristoph Lameter #include <linux/cpuset.h> 163935baa9SDavid Gibson #include <linux/mutex.h> 175da7ca86SChristoph Lameter 1863551ae0SDavid Gibson #include <asm/page.h> 1963551ae0SDavid Gibson #include <asm/pgtable.h> 2063551ae0SDavid Gibson 2163551ae0SDavid Gibson #include <linux/hugetlb.h> 227835e98bSNick Piggin #include "internal.h" 231da177e4SLinus Torvalds 241da177e4SLinus Torvalds const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL; 25a43a8c39SChen, Kenneth W static unsigned long nr_huge_pages, free_huge_pages, resv_huge_pages; 267893d1d5SAdam Litke static unsigned long surplus_huge_pages; 27064d9efeSNishanth Aravamudan static unsigned long nr_overcommit_huge_pages; 281da177e4SLinus Torvalds unsigned long max_huge_pages; 29064d9efeSNishanth Aravamudan unsigned long sysctl_overcommit_huge_pages; 301da177e4SLinus Torvalds static struct list_head hugepage_freelists[MAX_NUMNODES]; 311da177e4SLinus Torvalds static unsigned int nr_huge_pages_node[MAX_NUMNODES]; 321da177e4SLinus Torvalds static unsigned int free_huge_pages_node[MAX_NUMNODES]; 337893d1d5SAdam Litke static unsigned int surplus_huge_pages_node[MAX_NUMNODES]; 34396faf03SMel Gorman static gfp_t htlb_alloc_mask = GFP_HIGHUSER; 35396faf03SMel Gorman unsigned long hugepages_treat_as_movable; 3663b4613cSNishanth Aravamudan static int hugetlb_next_nid; 37396faf03SMel Gorman 383935baa9SDavid Gibson /* 393935baa9SDavid Gibson * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages 403935baa9SDavid Gibson */ 413935baa9SDavid Gibson static DEFINE_SPINLOCK(hugetlb_lock); 420bd0f9fbSEric Paris 4379ac6ba4SDavid Gibson static void clear_huge_page(struct page *page, unsigned long addr) 4479ac6ba4SDavid Gibson { 4579ac6ba4SDavid Gibson int i; 4679ac6ba4SDavid Gibson 4779ac6ba4SDavid Gibson might_sleep(); 4879ac6ba4SDavid Gibson for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); i++) { 4979ac6ba4SDavid Gibson cond_resched(); 50281e0e3bSRalf Baechle clear_user_highpage(page + i, addr + i * PAGE_SIZE); 5179ac6ba4SDavid Gibson } 5279ac6ba4SDavid Gibson } 5379ac6ba4SDavid Gibson 5479ac6ba4SDavid Gibson static void copy_huge_page(struct page *dst, struct page *src, 559de455b2SAtsushi Nemoto unsigned long addr, struct vm_area_struct *vma) 5679ac6ba4SDavid Gibson { 5779ac6ba4SDavid Gibson int i; 5879ac6ba4SDavid Gibson 5979ac6ba4SDavid Gibson might_sleep(); 6079ac6ba4SDavid Gibson for (i = 0; i < HPAGE_SIZE/PAGE_SIZE; i++) { 6179ac6ba4SDavid Gibson cond_resched(); 629de455b2SAtsushi Nemoto copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma); 6379ac6ba4SDavid Gibson } 6479ac6ba4SDavid Gibson } 6579ac6ba4SDavid Gibson 661da177e4SLinus Torvalds static void enqueue_huge_page(struct page *page) 671da177e4SLinus Torvalds { 681da177e4SLinus Torvalds int nid = page_to_nid(page); 691da177e4SLinus Torvalds list_add(&page->lru, &hugepage_freelists[nid]); 701da177e4SLinus Torvalds free_huge_pages++; 711da177e4SLinus Torvalds free_huge_pages_node[nid]++; 721da177e4SLinus Torvalds } 731da177e4SLinus Torvalds 745da7ca86SChristoph Lameter static struct page *dequeue_huge_page(struct vm_area_struct *vma, 755da7ca86SChristoph Lameter unsigned long address) 761da177e4SLinus Torvalds { 7731a5c6e4SNishanth Aravamudan int nid; 781da177e4SLinus Torvalds struct page *page = NULL; 79480eccf9SLee Schermerhorn struct mempolicy *mpol; 80396faf03SMel Gorman struct zonelist *zonelist = huge_zonelist(vma, address, 81480eccf9SLee Schermerhorn htlb_alloc_mask, &mpol); 8296df9333SChristoph Lameter struct zone **z; 831da177e4SLinus Torvalds 8496df9333SChristoph Lameter for (z = zonelist->zones; *z; z++) { 8589fa3024SChristoph Lameter nid = zone_to_nid(*z); 86396faf03SMel Gorman if (cpuset_zone_allowed_softwall(*z, htlb_alloc_mask) && 873abf7afdSAndrew Morton !list_empty(&hugepage_freelists[nid])) { 881da177e4SLinus Torvalds page = list_entry(hugepage_freelists[nid].next, 891da177e4SLinus Torvalds struct page, lru); 901da177e4SLinus Torvalds list_del(&page->lru); 911da177e4SLinus Torvalds free_huge_pages--; 921da177e4SLinus Torvalds free_huge_pages_node[nid]--; 93e4e574b7SAdam Litke if (vma && vma->vm_flags & VM_MAYSHARE) 94e4e574b7SAdam Litke resv_huge_pages--; 955ab3ee7bSKen Chen break; 961da177e4SLinus Torvalds } 973abf7afdSAndrew Morton } 98480eccf9SLee Schermerhorn mpol_free(mpol); /* unref if mpol !NULL */ 991da177e4SLinus Torvalds return page; 1001da177e4SLinus Torvalds } 1011da177e4SLinus Torvalds 1026af2acb6SAdam Litke static void update_and_free_page(struct page *page) 1036af2acb6SAdam Litke { 1046af2acb6SAdam Litke int i; 1056af2acb6SAdam Litke nr_huge_pages--; 1066af2acb6SAdam Litke nr_huge_pages_node[page_to_nid(page)]--; 1076af2acb6SAdam Litke for (i = 0; i < (HPAGE_SIZE / PAGE_SIZE); i++) { 1086af2acb6SAdam Litke page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced | 1096af2acb6SAdam Litke 1 << PG_dirty | 1 << PG_active | 1 << PG_reserved | 1106af2acb6SAdam Litke 1 << PG_private | 1<< PG_writeback); 1116af2acb6SAdam Litke } 1126af2acb6SAdam Litke set_compound_page_dtor(page, NULL); 1136af2acb6SAdam Litke set_page_refcounted(page); 1146af2acb6SAdam Litke __free_pages(page, HUGETLB_PAGE_ORDER); 1156af2acb6SAdam Litke } 1166af2acb6SAdam Litke 11727a85ef1SDavid Gibson static void free_huge_page(struct page *page) 11827a85ef1SDavid Gibson { 1197893d1d5SAdam Litke int nid = page_to_nid(page); 120c79fb75eSAdam Litke struct address_space *mapping; 12127a85ef1SDavid Gibson 122c79fb75eSAdam Litke mapping = (struct address_space *) page_private(page); 123e5df70abSAndy Whitcroft set_page_private(page, 0); 1247893d1d5SAdam Litke BUG_ON(page_count(page)); 12527a85ef1SDavid Gibson INIT_LIST_HEAD(&page->lru); 12627a85ef1SDavid Gibson 12727a85ef1SDavid Gibson spin_lock(&hugetlb_lock); 1287893d1d5SAdam Litke if (surplus_huge_pages_node[nid]) { 1297893d1d5SAdam Litke update_and_free_page(page); 1307893d1d5SAdam Litke surplus_huge_pages--; 1317893d1d5SAdam Litke surplus_huge_pages_node[nid]--; 1327893d1d5SAdam Litke } else { 13327a85ef1SDavid Gibson enqueue_huge_page(page); 1347893d1d5SAdam Litke } 13527a85ef1SDavid Gibson spin_unlock(&hugetlb_lock); 136c79fb75eSAdam Litke if (mapping) 1379a119c05SAdam Litke hugetlb_put_quota(mapping, 1); 13827a85ef1SDavid Gibson } 13927a85ef1SDavid Gibson 1407893d1d5SAdam Litke /* 1417893d1d5SAdam Litke * Increment or decrement surplus_huge_pages. Keep node-specific counters 1427893d1d5SAdam Litke * balanced by operating on them in a round-robin fashion. 1437893d1d5SAdam Litke * Returns 1 if an adjustment was made. 1447893d1d5SAdam Litke */ 1457893d1d5SAdam Litke static int adjust_pool_surplus(int delta) 1467893d1d5SAdam Litke { 1477893d1d5SAdam Litke static int prev_nid; 1487893d1d5SAdam Litke int nid = prev_nid; 1497893d1d5SAdam Litke int ret = 0; 1507893d1d5SAdam Litke 1517893d1d5SAdam Litke VM_BUG_ON(delta != -1 && delta != 1); 1527893d1d5SAdam Litke do { 1537893d1d5SAdam Litke nid = next_node(nid, node_online_map); 1547893d1d5SAdam Litke if (nid == MAX_NUMNODES) 1557893d1d5SAdam Litke nid = first_node(node_online_map); 1567893d1d5SAdam Litke 1577893d1d5SAdam Litke /* To shrink on this node, there must be a surplus page */ 1587893d1d5SAdam Litke if (delta < 0 && !surplus_huge_pages_node[nid]) 1597893d1d5SAdam Litke continue; 1607893d1d5SAdam Litke /* Surplus cannot exceed the total number of pages */ 1617893d1d5SAdam Litke if (delta > 0 && surplus_huge_pages_node[nid] >= 1627893d1d5SAdam Litke nr_huge_pages_node[nid]) 1637893d1d5SAdam Litke continue; 1647893d1d5SAdam Litke 1657893d1d5SAdam Litke surplus_huge_pages += delta; 1667893d1d5SAdam Litke surplus_huge_pages_node[nid] += delta; 1677893d1d5SAdam Litke ret = 1; 1687893d1d5SAdam Litke break; 1697893d1d5SAdam Litke } while (nid != prev_nid); 1707893d1d5SAdam Litke 1717893d1d5SAdam Litke prev_nid = nid; 1727893d1d5SAdam Litke return ret; 1737893d1d5SAdam Litke } 1747893d1d5SAdam Litke 17563b4613cSNishanth Aravamudan static struct page *alloc_fresh_huge_page_node(int nid) 1761da177e4SLinus Torvalds { 1771da177e4SLinus Torvalds struct page *page; 178f96efd58SJoe Jin 17963b4613cSNishanth Aravamudan page = alloc_pages_node(nid, 18063b4613cSNishanth Aravamudan htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|__GFP_NOWARN, 181f96efd58SJoe Jin HUGETLB_PAGE_ORDER); 1821da177e4SLinus Torvalds if (page) { 18333f2ef89SAndy Whitcroft set_compound_page_dtor(page, free_huge_page); 1840bd0f9fbSEric Paris spin_lock(&hugetlb_lock); 1851da177e4SLinus Torvalds nr_huge_pages++; 18663b4613cSNishanth Aravamudan nr_huge_pages_node[nid]++; 1870bd0f9fbSEric Paris spin_unlock(&hugetlb_lock); 188a482289dSNick Piggin put_page(page); /* free it into the hugepage allocator */ 1891da177e4SLinus Torvalds } 19063b4613cSNishanth Aravamudan 19163b4613cSNishanth Aravamudan return page; 19263b4613cSNishanth Aravamudan } 19363b4613cSNishanth Aravamudan 19463b4613cSNishanth Aravamudan static int alloc_fresh_huge_page(void) 19563b4613cSNishanth Aravamudan { 19663b4613cSNishanth Aravamudan struct page *page; 19763b4613cSNishanth Aravamudan int start_nid; 19863b4613cSNishanth Aravamudan int next_nid; 19963b4613cSNishanth Aravamudan int ret = 0; 20063b4613cSNishanth Aravamudan 20163b4613cSNishanth Aravamudan start_nid = hugetlb_next_nid; 20263b4613cSNishanth Aravamudan 20363b4613cSNishanth Aravamudan do { 20463b4613cSNishanth Aravamudan page = alloc_fresh_huge_page_node(hugetlb_next_nid); 20563b4613cSNishanth Aravamudan if (page) 20663b4613cSNishanth Aravamudan ret = 1; 20763b4613cSNishanth Aravamudan /* 20863b4613cSNishanth Aravamudan * Use a helper variable to find the next node and then 20963b4613cSNishanth Aravamudan * copy it back to hugetlb_next_nid afterwards: 21063b4613cSNishanth Aravamudan * otherwise there's a window in which a racer might 21163b4613cSNishanth Aravamudan * pass invalid nid MAX_NUMNODES to alloc_pages_node. 21263b4613cSNishanth Aravamudan * But we don't need to use a spin_lock here: it really 21363b4613cSNishanth Aravamudan * doesn't matter if occasionally a racer chooses the 21463b4613cSNishanth Aravamudan * same nid as we do. Move nid forward in the mask even 21563b4613cSNishanth Aravamudan * if we just successfully allocated a hugepage so that 21663b4613cSNishanth Aravamudan * the next caller gets hugepages on the next node. 21763b4613cSNishanth Aravamudan */ 21863b4613cSNishanth Aravamudan next_nid = next_node(hugetlb_next_nid, node_online_map); 21963b4613cSNishanth Aravamudan if (next_nid == MAX_NUMNODES) 22063b4613cSNishanth Aravamudan next_nid = first_node(node_online_map); 22163b4613cSNishanth Aravamudan hugetlb_next_nid = next_nid; 22263b4613cSNishanth Aravamudan } while (!page && hugetlb_next_nid != start_nid); 22363b4613cSNishanth Aravamudan 22463b4613cSNishanth Aravamudan return ret; 2251da177e4SLinus Torvalds } 2261da177e4SLinus Torvalds 2277893d1d5SAdam Litke static struct page *alloc_buddy_huge_page(struct vm_area_struct *vma, 2287893d1d5SAdam Litke unsigned long address) 2297893d1d5SAdam Litke { 2307893d1d5SAdam Litke struct page *page; 231d1c3fb1fSNishanth Aravamudan unsigned int nid; 2327893d1d5SAdam Litke 233d1c3fb1fSNishanth Aravamudan /* 234d1c3fb1fSNishanth Aravamudan * Assume we will successfully allocate the surplus page to 235d1c3fb1fSNishanth Aravamudan * prevent racing processes from causing the surplus to exceed 236d1c3fb1fSNishanth Aravamudan * overcommit 237d1c3fb1fSNishanth Aravamudan * 238d1c3fb1fSNishanth Aravamudan * This however introduces a different race, where a process B 239d1c3fb1fSNishanth Aravamudan * tries to grow the static hugepage pool while alloc_pages() is 240d1c3fb1fSNishanth Aravamudan * called by process A. B will only examine the per-node 241d1c3fb1fSNishanth Aravamudan * counters in determining if surplus huge pages can be 242d1c3fb1fSNishanth Aravamudan * converted to normal huge pages in adjust_pool_surplus(). A 243d1c3fb1fSNishanth Aravamudan * won't be able to increment the per-node counter, until the 244d1c3fb1fSNishanth Aravamudan * lock is dropped by B, but B doesn't drop hugetlb_lock until 245d1c3fb1fSNishanth Aravamudan * no more huge pages can be converted from surplus to normal 246d1c3fb1fSNishanth Aravamudan * state (and doesn't try to convert again). Thus, we have a 247d1c3fb1fSNishanth Aravamudan * case where a surplus huge page exists, the pool is grown, and 248d1c3fb1fSNishanth Aravamudan * the surplus huge page still exists after, even though it 249d1c3fb1fSNishanth Aravamudan * should just have been converted to a normal huge page. This 250d1c3fb1fSNishanth Aravamudan * does not leak memory, though, as the hugepage will be freed 251d1c3fb1fSNishanth Aravamudan * once it is out of use. It also does not allow the counters to 252d1c3fb1fSNishanth Aravamudan * go out of whack in adjust_pool_surplus() as we don't modify 253d1c3fb1fSNishanth Aravamudan * the node values until we've gotten the hugepage and only the 254d1c3fb1fSNishanth Aravamudan * per-node value is checked there. 255d1c3fb1fSNishanth Aravamudan */ 256d1c3fb1fSNishanth Aravamudan spin_lock(&hugetlb_lock); 257d1c3fb1fSNishanth Aravamudan if (surplus_huge_pages >= nr_overcommit_huge_pages) { 258d1c3fb1fSNishanth Aravamudan spin_unlock(&hugetlb_lock); 259d1c3fb1fSNishanth Aravamudan return NULL; 260d1c3fb1fSNishanth Aravamudan } else { 261d1c3fb1fSNishanth Aravamudan nr_huge_pages++; 262d1c3fb1fSNishanth Aravamudan surplus_huge_pages++; 263d1c3fb1fSNishanth Aravamudan } 264d1c3fb1fSNishanth Aravamudan spin_unlock(&hugetlb_lock); 265d1c3fb1fSNishanth Aravamudan 2667893d1d5SAdam Litke page = alloc_pages(htlb_alloc_mask|__GFP_COMP|__GFP_NOWARN, 2677893d1d5SAdam Litke HUGETLB_PAGE_ORDER); 268d1c3fb1fSNishanth Aravamudan 2697893d1d5SAdam Litke spin_lock(&hugetlb_lock); 270d1c3fb1fSNishanth Aravamudan if (page) { 271d1c3fb1fSNishanth Aravamudan nid = page_to_nid(page); 272d1c3fb1fSNishanth Aravamudan set_compound_page_dtor(page, free_huge_page); 273d1c3fb1fSNishanth Aravamudan /* 274d1c3fb1fSNishanth Aravamudan * We incremented the global counters already 275d1c3fb1fSNishanth Aravamudan */ 276d1c3fb1fSNishanth Aravamudan nr_huge_pages_node[nid]++; 277d1c3fb1fSNishanth Aravamudan surplus_huge_pages_node[nid]++; 278d1c3fb1fSNishanth Aravamudan } else { 279d1c3fb1fSNishanth Aravamudan nr_huge_pages--; 280d1c3fb1fSNishanth Aravamudan surplus_huge_pages--; 2817893d1d5SAdam Litke } 282d1c3fb1fSNishanth Aravamudan spin_unlock(&hugetlb_lock); 2837893d1d5SAdam Litke 2847893d1d5SAdam Litke return page; 2857893d1d5SAdam Litke } 2867893d1d5SAdam Litke 287e4e574b7SAdam Litke /* 288e4e574b7SAdam Litke * Increase the hugetlb pool such that it can accomodate a reservation 289e4e574b7SAdam Litke * of size 'delta'. 290e4e574b7SAdam Litke */ 291e4e574b7SAdam Litke static int gather_surplus_pages(int delta) 292e4e574b7SAdam Litke { 293e4e574b7SAdam Litke struct list_head surplus_list; 294e4e574b7SAdam Litke struct page *page, *tmp; 295e4e574b7SAdam Litke int ret, i; 296e4e574b7SAdam Litke int needed, allocated; 297e4e574b7SAdam Litke 298e4e574b7SAdam Litke needed = (resv_huge_pages + delta) - free_huge_pages; 299ac09b3a1SAdam Litke if (needed <= 0) { 300ac09b3a1SAdam Litke resv_huge_pages += delta; 301e4e574b7SAdam Litke return 0; 302ac09b3a1SAdam Litke } 303e4e574b7SAdam Litke 304e4e574b7SAdam Litke allocated = 0; 305e4e574b7SAdam Litke INIT_LIST_HEAD(&surplus_list); 306e4e574b7SAdam Litke 307e4e574b7SAdam Litke ret = -ENOMEM; 308e4e574b7SAdam Litke retry: 309e4e574b7SAdam Litke spin_unlock(&hugetlb_lock); 310e4e574b7SAdam Litke for (i = 0; i < needed; i++) { 311e4e574b7SAdam Litke page = alloc_buddy_huge_page(NULL, 0); 312e4e574b7SAdam Litke if (!page) { 313e4e574b7SAdam Litke /* 314e4e574b7SAdam Litke * We were not able to allocate enough pages to 315e4e574b7SAdam Litke * satisfy the entire reservation so we free what 316e4e574b7SAdam Litke * we've allocated so far. 317e4e574b7SAdam Litke */ 318e4e574b7SAdam Litke spin_lock(&hugetlb_lock); 319e4e574b7SAdam Litke needed = 0; 320e4e574b7SAdam Litke goto free; 321e4e574b7SAdam Litke } 322e4e574b7SAdam Litke 323e4e574b7SAdam Litke list_add(&page->lru, &surplus_list); 324e4e574b7SAdam Litke } 325e4e574b7SAdam Litke allocated += needed; 326e4e574b7SAdam Litke 327e4e574b7SAdam Litke /* 328e4e574b7SAdam Litke * After retaking hugetlb_lock, we need to recalculate 'needed' 329e4e574b7SAdam Litke * because either resv_huge_pages or free_huge_pages may have changed. 330e4e574b7SAdam Litke */ 331e4e574b7SAdam Litke spin_lock(&hugetlb_lock); 332e4e574b7SAdam Litke needed = (resv_huge_pages + delta) - (free_huge_pages + allocated); 333e4e574b7SAdam Litke if (needed > 0) 334e4e574b7SAdam Litke goto retry; 335e4e574b7SAdam Litke 336e4e574b7SAdam Litke /* 337e4e574b7SAdam Litke * The surplus_list now contains _at_least_ the number of extra pages 338e4e574b7SAdam Litke * needed to accomodate the reservation. Add the appropriate number 339e4e574b7SAdam Litke * of pages to the hugetlb pool and free the extras back to the buddy 340ac09b3a1SAdam Litke * allocator. Commit the entire reservation here to prevent another 341ac09b3a1SAdam Litke * process from stealing the pages as they are added to the pool but 342ac09b3a1SAdam Litke * before they are reserved. 343e4e574b7SAdam Litke */ 344e4e574b7SAdam Litke needed += allocated; 345ac09b3a1SAdam Litke resv_huge_pages += delta; 346e4e574b7SAdam Litke ret = 0; 347e4e574b7SAdam Litke free: 348e4e574b7SAdam Litke list_for_each_entry_safe(page, tmp, &surplus_list, lru) { 349e4e574b7SAdam Litke list_del(&page->lru); 350e4e574b7SAdam Litke if ((--needed) >= 0) 351e4e574b7SAdam Litke enqueue_huge_page(page); 352af767cbdSAdam Litke else { 353af767cbdSAdam Litke /* 354af767cbdSAdam Litke * Decrement the refcount and free the page using its 355af767cbdSAdam Litke * destructor. This must be done with hugetlb_lock 356af767cbdSAdam Litke * unlocked which is safe because free_huge_page takes 357af767cbdSAdam Litke * hugetlb_lock before deciding how to free the page. 358af767cbdSAdam Litke */ 359af767cbdSAdam Litke spin_unlock(&hugetlb_lock); 360af767cbdSAdam Litke put_page(page); 361af767cbdSAdam Litke spin_lock(&hugetlb_lock); 362af767cbdSAdam Litke } 363e4e574b7SAdam Litke } 364e4e574b7SAdam Litke 365e4e574b7SAdam Litke return ret; 366e4e574b7SAdam Litke } 367e4e574b7SAdam Litke 368e4e574b7SAdam Litke /* 369e4e574b7SAdam Litke * When releasing a hugetlb pool reservation, any surplus pages that were 370e4e574b7SAdam Litke * allocated to satisfy the reservation must be explicitly freed if they were 371e4e574b7SAdam Litke * never used. 372e4e574b7SAdam Litke */ 3738cde045cSAdrian Bunk static void return_unused_surplus_pages(unsigned long unused_resv_pages) 374e4e574b7SAdam Litke { 375e4e574b7SAdam Litke static int nid = -1; 376e4e574b7SAdam Litke struct page *page; 377e4e574b7SAdam Litke unsigned long nr_pages; 378e4e574b7SAdam Litke 379ac09b3a1SAdam Litke /* Uncommit the reservation */ 380ac09b3a1SAdam Litke resv_huge_pages -= unused_resv_pages; 381ac09b3a1SAdam Litke 382e4e574b7SAdam Litke nr_pages = min(unused_resv_pages, surplus_huge_pages); 383e4e574b7SAdam Litke 384e4e574b7SAdam Litke while (nr_pages) { 385e4e574b7SAdam Litke nid = next_node(nid, node_online_map); 386e4e574b7SAdam Litke if (nid == MAX_NUMNODES) 387e4e574b7SAdam Litke nid = first_node(node_online_map); 388e4e574b7SAdam Litke 389e4e574b7SAdam Litke if (!surplus_huge_pages_node[nid]) 390e4e574b7SAdam Litke continue; 391e4e574b7SAdam Litke 392e4e574b7SAdam Litke if (!list_empty(&hugepage_freelists[nid])) { 393e4e574b7SAdam Litke page = list_entry(hugepage_freelists[nid].next, 394e4e574b7SAdam Litke struct page, lru); 395e4e574b7SAdam Litke list_del(&page->lru); 396e4e574b7SAdam Litke update_and_free_page(page); 397e4e574b7SAdam Litke free_huge_pages--; 398e4e574b7SAdam Litke free_huge_pages_node[nid]--; 399e4e574b7SAdam Litke surplus_huge_pages--; 400e4e574b7SAdam Litke surplus_huge_pages_node[nid]--; 401e4e574b7SAdam Litke nr_pages--; 402e4e574b7SAdam Litke } 403e4e574b7SAdam Litke } 404e4e574b7SAdam Litke } 405e4e574b7SAdam Litke 406348ea204SAdam Litke 407348ea204SAdam Litke static struct page *alloc_huge_page_shared(struct vm_area_struct *vma, 408348ea204SAdam Litke unsigned long addr) 409348ea204SAdam Litke { 410348ea204SAdam Litke struct page *page; 411348ea204SAdam Litke 412348ea204SAdam Litke spin_lock(&hugetlb_lock); 413348ea204SAdam Litke page = dequeue_huge_page(vma, addr); 414348ea204SAdam Litke spin_unlock(&hugetlb_lock); 41590d8b7e6SAdam Litke return page ? page : ERR_PTR(-VM_FAULT_OOM); 416348ea204SAdam Litke } 417348ea204SAdam Litke 418348ea204SAdam Litke static struct page *alloc_huge_page_private(struct vm_area_struct *vma, 41927a85ef1SDavid Gibson unsigned long addr) 4201da177e4SLinus Torvalds { 4217893d1d5SAdam Litke struct page *page = NULL; 4221da177e4SLinus Torvalds 42390d8b7e6SAdam Litke if (hugetlb_get_quota(vma->vm_file->f_mapping, 1)) 42490d8b7e6SAdam Litke return ERR_PTR(-VM_FAULT_SIGBUS); 42590d8b7e6SAdam Litke 4261da177e4SLinus Torvalds spin_lock(&hugetlb_lock); 427348ea204SAdam Litke if (free_huge_pages > resv_huge_pages) 428b45b5bd6SDavid Gibson page = dequeue_huge_page(vma, addr); 429348ea204SAdam Litke spin_unlock(&hugetlb_lock); 43068842c9bSKen Chen if (!page) { 4317893d1d5SAdam Litke page = alloc_buddy_huge_page(vma, addr); 43268842c9bSKen Chen if (!page) { 43368842c9bSKen Chen hugetlb_put_quota(vma->vm_file->f_mapping, 1); 43468842c9bSKen Chen return ERR_PTR(-VM_FAULT_OOM); 43568842c9bSKen Chen } 43668842c9bSKen Chen } 43768842c9bSKen Chen return page; 438348ea204SAdam Litke } 4397893d1d5SAdam Litke 440348ea204SAdam Litke static struct page *alloc_huge_page(struct vm_area_struct *vma, 441348ea204SAdam Litke unsigned long addr) 442348ea204SAdam Litke { 443348ea204SAdam Litke struct page *page; 4442fc39cecSAdam Litke struct address_space *mapping = vma->vm_file->f_mapping; 4452fc39cecSAdam Litke 446348ea204SAdam Litke if (vma->vm_flags & VM_MAYSHARE) 447348ea204SAdam Litke page = alloc_huge_page_shared(vma, addr); 448348ea204SAdam Litke else 449348ea204SAdam Litke page = alloc_huge_page_private(vma, addr); 45090d8b7e6SAdam Litke 45190d8b7e6SAdam Litke if (!IS_ERR(page)) { 452348ea204SAdam Litke set_page_refcounted(page); 4532fc39cecSAdam Litke set_page_private(page, (unsigned long) mapping); 45490d8b7e6SAdam Litke } 4557893d1d5SAdam Litke return page; 456b45b5bd6SDavid Gibson } 457b45b5bd6SDavid Gibson 4581da177e4SLinus Torvalds static int __init hugetlb_init(void) 4591da177e4SLinus Torvalds { 4601da177e4SLinus Torvalds unsigned long i; 4611da177e4SLinus Torvalds 4623c726f8dSBenjamin Herrenschmidt if (HPAGE_SHIFT == 0) 4633c726f8dSBenjamin Herrenschmidt return 0; 4643c726f8dSBenjamin Herrenschmidt 4651da177e4SLinus Torvalds for (i = 0; i < MAX_NUMNODES; ++i) 4661da177e4SLinus Torvalds INIT_LIST_HEAD(&hugepage_freelists[i]); 4671da177e4SLinus Torvalds 46863b4613cSNishanth Aravamudan hugetlb_next_nid = first_node(node_online_map); 46963b4613cSNishanth Aravamudan 4701da177e4SLinus Torvalds for (i = 0; i < max_huge_pages; ++i) { 471a482289dSNick Piggin if (!alloc_fresh_huge_page()) 4721da177e4SLinus Torvalds break; 4731da177e4SLinus Torvalds } 4741da177e4SLinus Torvalds max_huge_pages = free_huge_pages = nr_huge_pages = i; 4751da177e4SLinus Torvalds printk("Total HugeTLB memory allocated, %ld\n", free_huge_pages); 4761da177e4SLinus Torvalds return 0; 4771da177e4SLinus Torvalds } 4781da177e4SLinus Torvalds module_init(hugetlb_init); 4791da177e4SLinus Torvalds 4801da177e4SLinus Torvalds static int __init hugetlb_setup(char *s) 4811da177e4SLinus Torvalds { 4821da177e4SLinus Torvalds if (sscanf(s, "%lu", &max_huge_pages) <= 0) 4831da177e4SLinus Torvalds max_huge_pages = 0; 4841da177e4SLinus Torvalds return 1; 4851da177e4SLinus Torvalds } 4861da177e4SLinus Torvalds __setup("hugepages=", hugetlb_setup); 4871da177e4SLinus Torvalds 4888a630112SKen Chen static unsigned int cpuset_mems_nr(unsigned int *array) 4898a630112SKen Chen { 4908a630112SKen Chen int node; 4918a630112SKen Chen unsigned int nr = 0; 4928a630112SKen Chen 4938a630112SKen Chen for_each_node_mask(node, cpuset_current_mems_allowed) 4948a630112SKen Chen nr += array[node]; 4958a630112SKen Chen 4968a630112SKen Chen return nr; 4978a630112SKen Chen } 4988a630112SKen Chen 4991da177e4SLinus Torvalds #ifdef CONFIG_SYSCTL 5001da177e4SLinus Torvalds #ifdef CONFIG_HIGHMEM 5011da177e4SLinus Torvalds static void try_to_free_low(unsigned long count) 5021da177e4SLinus Torvalds { 5034415cc8dSChristoph Lameter int i; 5044415cc8dSChristoph Lameter 5051da177e4SLinus Torvalds for (i = 0; i < MAX_NUMNODES; ++i) { 5061da177e4SLinus Torvalds struct page *page, *next; 5071da177e4SLinus Torvalds list_for_each_entry_safe(page, next, &hugepage_freelists[i], lru) { 5086b0c880dSAdam Litke if (count >= nr_huge_pages) 5096b0c880dSAdam Litke return; 5101da177e4SLinus Torvalds if (PageHighMem(page)) 5111da177e4SLinus Torvalds continue; 5121da177e4SLinus Torvalds list_del(&page->lru); 5131da177e4SLinus Torvalds update_and_free_page(page); 5141da177e4SLinus Torvalds free_huge_pages--; 5154415cc8dSChristoph Lameter free_huge_pages_node[page_to_nid(page)]--; 5161da177e4SLinus Torvalds } 5171da177e4SLinus Torvalds } 5181da177e4SLinus Torvalds } 5191da177e4SLinus Torvalds #else 5201da177e4SLinus Torvalds static inline void try_to_free_low(unsigned long count) 5211da177e4SLinus Torvalds { 5221da177e4SLinus Torvalds } 5231da177e4SLinus Torvalds #endif 5241da177e4SLinus Torvalds 5257893d1d5SAdam Litke #define persistent_huge_pages (nr_huge_pages - surplus_huge_pages) 5261da177e4SLinus Torvalds static unsigned long set_max_huge_pages(unsigned long count) 5271da177e4SLinus Torvalds { 5287893d1d5SAdam Litke unsigned long min_count, ret; 5291da177e4SLinus Torvalds 5307893d1d5SAdam Litke /* 5317893d1d5SAdam Litke * Increase the pool size 5327893d1d5SAdam Litke * First take pages out of surplus state. Then make up the 5337893d1d5SAdam Litke * remaining difference by allocating fresh huge pages. 534d1c3fb1fSNishanth Aravamudan * 535d1c3fb1fSNishanth Aravamudan * We might race with alloc_buddy_huge_page() here and be unable 536d1c3fb1fSNishanth Aravamudan * to convert a surplus huge page to a normal huge page. That is 537d1c3fb1fSNishanth Aravamudan * not critical, though, it just means the overall size of the 538d1c3fb1fSNishanth Aravamudan * pool might be one hugepage larger than it needs to be, but 539d1c3fb1fSNishanth Aravamudan * within all the constraints specified by the sysctls. 5407893d1d5SAdam Litke */ 5411da177e4SLinus Torvalds spin_lock(&hugetlb_lock); 5427893d1d5SAdam Litke while (surplus_huge_pages && count > persistent_huge_pages) { 5437893d1d5SAdam Litke if (!adjust_pool_surplus(-1)) 5447893d1d5SAdam Litke break; 5457893d1d5SAdam Litke } 5467893d1d5SAdam Litke 5477893d1d5SAdam Litke while (count > persistent_huge_pages) { 5487893d1d5SAdam Litke int ret; 5497893d1d5SAdam Litke /* 5507893d1d5SAdam Litke * If this allocation races such that we no longer need the 5517893d1d5SAdam Litke * page, free_huge_page will handle it by freeing the page 5527893d1d5SAdam Litke * and reducing the surplus. 5537893d1d5SAdam Litke */ 5547893d1d5SAdam Litke spin_unlock(&hugetlb_lock); 5557893d1d5SAdam Litke ret = alloc_fresh_huge_page(); 5567893d1d5SAdam Litke spin_lock(&hugetlb_lock); 5577893d1d5SAdam Litke if (!ret) 5587893d1d5SAdam Litke goto out; 5597893d1d5SAdam Litke 5607893d1d5SAdam Litke } 5617893d1d5SAdam Litke 5627893d1d5SAdam Litke /* 5637893d1d5SAdam Litke * Decrease the pool size 5647893d1d5SAdam Litke * First return free pages to the buddy allocator (being careful 5657893d1d5SAdam Litke * to keep enough around to satisfy reservations). Then place 5667893d1d5SAdam Litke * pages into surplus state as needed so the pool will shrink 5677893d1d5SAdam Litke * to the desired size as pages become free. 568d1c3fb1fSNishanth Aravamudan * 569d1c3fb1fSNishanth Aravamudan * By placing pages into the surplus state independent of the 570d1c3fb1fSNishanth Aravamudan * overcommit value, we are allowing the surplus pool size to 571d1c3fb1fSNishanth Aravamudan * exceed overcommit. There are few sane options here. Since 572d1c3fb1fSNishanth Aravamudan * alloc_buddy_huge_page() is checking the global counter, 573d1c3fb1fSNishanth Aravamudan * though, we'll note that we're not allowed to exceed surplus 574d1c3fb1fSNishanth Aravamudan * and won't grow the pool anywhere else. Not until one of the 575d1c3fb1fSNishanth Aravamudan * sysctls are changed, or the surplus pages go out of use. 5767893d1d5SAdam Litke */ 5776b0c880dSAdam Litke min_count = resv_huge_pages + nr_huge_pages - free_huge_pages; 5786b0c880dSAdam Litke min_count = max(count, min_count); 5797893d1d5SAdam Litke try_to_free_low(min_count); 5807893d1d5SAdam Litke while (min_count < persistent_huge_pages) { 5815da7ca86SChristoph Lameter struct page *page = dequeue_huge_page(NULL, 0); 5821da177e4SLinus Torvalds if (!page) 5831da177e4SLinus Torvalds break; 5841da177e4SLinus Torvalds update_and_free_page(page); 5851da177e4SLinus Torvalds } 5867893d1d5SAdam Litke while (count < persistent_huge_pages) { 5877893d1d5SAdam Litke if (!adjust_pool_surplus(1)) 5887893d1d5SAdam Litke break; 5897893d1d5SAdam Litke } 5907893d1d5SAdam Litke out: 5917893d1d5SAdam Litke ret = persistent_huge_pages; 5921da177e4SLinus Torvalds spin_unlock(&hugetlb_lock); 5937893d1d5SAdam Litke return ret; 5941da177e4SLinus Torvalds } 5951da177e4SLinus Torvalds 5961da177e4SLinus Torvalds int hugetlb_sysctl_handler(struct ctl_table *table, int write, 5971da177e4SLinus Torvalds struct file *file, void __user *buffer, 5981da177e4SLinus Torvalds size_t *length, loff_t *ppos) 5991da177e4SLinus Torvalds { 6001da177e4SLinus Torvalds proc_doulongvec_minmax(table, write, file, buffer, length, ppos); 6011da177e4SLinus Torvalds max_huge_pages = set_max_huge_pages(max_huge_pages); 6021da177e4SLinus Torvalds return 0; 6031da177e4SLinus Torvalds } 604396faf03SMel Gorman 605396faf03SMel Gorman int hugetlb_treat_movable_handler(struct ctl_table *table, int write, 606396faf03SMel Gorman struct file *file, void __user *buffer, 607396faf03SMel Gorman size_t *length, loff_t *ppos) 608396faf03SMel Gorman { 609396faf03SMel Gorman proc_dointvec(table, write, file, buffer, length, ppos); 610396faf03SMel Gorman if (hugepages_treat_as_movable) 611396faf03SMel Gorman htlb_alloc_mask = GFP_HIGHUSER_MOVABLE; 612396faf03SMel Gorman else 613396faf03SMel Gorman htlb_alloc_mask = GFP_HIGHUSER; 614396faf03SMel Gorman return 0; 615396faf03SMel Gorman } 616396faf03SMel Gorman 617a3d0c6aaSNishanth Aravamudan int hugetlb_overcommit_handler(struct ctl_table *table, int write, 618a3d0c6aaSNishanth Aravamudan struct file *file, void __user *buffer, 619a3d0c6aaSNishanth Aravamudan size_t *length, loff_t *ppos) 620a3d0c6aaSNishanth Aravamudan { 621a3d0c6aaSNishanth Aravamudan proc_doulongvec_minmax(table, write, file, buffer, length, ppos); 622064d9efeSNishanth Aravamudan spin_lock(&hugetlb_lock); 623064d9efeSNishanth Aravamudan nr_overcommit_huge_pages = sysctl_overcommit_huge_pages; 624a3d0c6aaSNishanth Aravamudan spin_unlock(&hugetlb_lock); 625a3d0c6aaSNishanth Aravamudan return 0; 626a3d0c6aaSNishanth Aravamudan } 627a3d0c6aaSNishanth Aravamudan 6281da177e4SLinus Torvalds #endif /* CONFIG_SYSCTL */ 6291da177e4SLinus Torvalds 6301da177e4SLinus Torvalds int hugetlb_report_meminfo(char *buf) 6311da177e4SLinus Torvalds { 6321da177e4SLinus Torvalds return sprintf(buf, 6331da177e4SLinus Torvalds "HugePages_Total: %5lu\n" 6341da177e4SLinus Torvalds "HugePages_Free: %5lu\n" 635b45b5bd6SDavid Gibson "HugePages_Rsvd: %5lu\n" 6367893d1d5SAdam Litke "HugePages_Surp: %5lu\n" 6371da177e4SLinus Torvalds "Hugepagesize: %5lu kB\n", 6381da177e4SLinus Torvalds nr_huge_pages, 6391da177e4SLinus Torvalds free_huge_pages, 640a43a8c39SChen, Kenneth W resv_huge_pages, 6417893d1d5SAdam Litke surplus_huge_pages, 6421da177e4SLinus Torvalds HPAGE_SIZE/1024); 6431da177e4SLinus Torvalds } 6441da177e4SLinus Torvalds 6451da177e4SLinus Torvalds int hugetlb_report_node_meminfo(int nid, char *buf) 6461da177e4SLinus Torvalds { 6471da177e4SLinus Torvalds return sprintf(buf, 6481da177e4SLinus Torvalds "Node %d HugePages_Total: %5u\n" 6491da177e4SLinus Torvalds "Node %d HugePages_Free: %5u\n", 6501da177e4SLinus Torvalds nid, nr_huge_pages_node[nid], 6511da177e4SLinus Torvalds nid, free_huge_pages_node[nid]); 6521da177e4SLinus Torvalds } 6531da177e4SLinus Torvalds 6541da177e4SLinus Torvalds /* Return the number pages of memory we physically have, in PAGE_SIZE units. */ 6551da177e4SLinus Torvalds unsigned long hugetlb_total_pages(void) 6561da177e4SLinus Torvalds { 6571da177e4SLinus Torvalds return nr_huge_pages * (HPAGE_SIZE / PAGE_SIZE); 6581da177e4SLinus Torvalds } 6591da177e4SLinus Torvalds 6601da177e4SLinus Torvalds /* 6611da177e4SLinus Torvalds * We cannot handle pagefaults against hugetlb pages at all. They cause 6621da177e4SLinus Torvalds * handle_mm_fault() to try to instantiate regular-sized pages in the 6631da177e4SLinus Torvalds * hugegpage VMA. do_page_fault() is supposed to trap this, so BUG is we get 6641da177e4SLinus Torvalds * this far. 6651da177e4SLinus Torvalds */ 666d0217ac0SNick Piggin static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 6671da177e4SLinus Torvalds { 6681da177e4SLinus Torvalds BUG(); 669d0217ac0SNick Piggin return 0; 6701da177e4SLinus Torvalds } 6711da177e4SLinus Torvalds 6721da177e4SLinus Torvalds struct vm_operations_struct hugetlb_vm_ops = { 673d0217ac0SNick Piggin .fault = hugetlb_vm_op_fault, 6741da177e4SLinus Torvalds }; 6751da177e4SLinus Torvalds 6761e8f889bSDavid Gibson static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page, 6771e8f889bSDavid Gibson int writable) 67863551ae0SDavid Gibson { 67963551ae0SDavid Gibson pte_t entry; 68063551ae0SDavid Gibson 6811e8f889bSDavid Gibson if (writable) { 68263551ae0SDavid Gibson entry = 68363551ae0SDavid Gibson pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot))); 68463551ae0SDavid Gibson } else { 68563551ae0SDavid Gibson entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot)); 68663551ae0SDavid Gibson } 68763551ae0SDavid Gibson entry = pte_mkyoung(entry); 68863551ae0SDavid Gibson entry = pte_mkhuge(entry); 68963551ae0SDavid Gibson 69063551ae0SDavid Gibson return entry; 69163551ae0SDavid Gibson } 69263551ae0SDavid Gibson 6931e8f889bSDavid Gibson static void set_huge_ptep_writable(struct vm_area_struct *vma, 6941e8f889bSDavid Gibson unsigned long address, pte_t *ptep) 6951e8f889bSDavid Gibson { 6961e8f889bSDavid Gibson pte_t entry; 6971e8f889bSDavid Gibson 6981e8f889bSDavid Gibson entry = pte_mkwrite(pte_mkdirty(*ptep)); 6998dab5241SBenjamin Herrenschmidt if (ptep_set_access_flags(vma, address, ptep, entry, 1)) { 7001e8f889bSDavid Gibson update_mmu_cache(vma, address, entry); 7011e8f889bSDavid Gibson } 7028dab5241SBenjamin Herrenschmidt } 7031e8f889bSDavid Gibson 7041e8f889bSDavid Gibson 70563551ae0SDavid Gibson int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, 70663551ae0SDavid Gibson struct vm_area_struct *vma) 70763551ae0SDavid Gibson { 70863551ae0SDavid Gibson pte_t *src_pte, *dst_pte, entry; 70963551ae0SDavid Gibson struct page *ptepage; 7101c59827dSHugh Dickins unsigned long addr; 7111e8f889bSDavid Gibson int cow; 7121e8f889bSDavid Gibson 7131e8f889bSDavid Gibson cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; 71463551ae0SDavid Gibson 7151c59827dSHugh Dickins for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) { 716c74df32cSHugh Dickins src_pte = huge_pte_offset(src, addr); 717c74df32cSHugh Dickins if (!src_pte) 718c74df32cSHugh Dickins continue; 71963551ae0SDavid Gibson dst_pte = huge_pte_alloc(dst, addr); 72063551ae0SDavid Gibson if (!dst_pte) 72163551ae0SDavid Gibson goto nomem; 722c5c99429SLarry Woodman 723c5c99429SLarry Woodman /* If the pagetables are shared don't copy or take references */ 724c5c99429SLarry Woodman if (dst_pte == src_pte) 725c5c99429SLarry Woodman continue; 726c5c99429SLarry Woodman 727c74df32cSHugh Dickins spin_lock(&dst->page_table_lock); 7281c59827dSHugh Dickins spin_lock(&src->page_table_lock); 729c74df32cSHugh Dickins if (!pte_none(*src_pte)) { 7301e8f889bSDavid Gibson if (cow) 7311e8f889bSDavid Gibson ptep_set_wrprotect(src, addr, src_pte); 73263551ae0SDavid Gibson entry = *src_pte; 73363551ae0SDavid Gibson ptepage = pte_page(entry); 73463551ae0SDavid Gibson get_page(ptepage); 73563551ae0SDavid Gibson set_huge_pte_at(dst, addr, dst_pte, entry); 7361c59827dSHugh Dickins } 7371c59827dSHugh Dickins spin_unlock(&src->page_table_lock); 738c74df32cSHugh Dickins spin_unlock(&dst->page_table_lock); 73963551ae0SDavid Gibson } 74063551ae0SDavid Gibson return 0; 74163551ae0SDavid Gibson 74263551ae0SDavid Gibson nomem: 74363551ae0SDavid Gibson return -ENOMEM; 74463551ae0SDavid Gibson } 74563551ae0SDavid Gibson 746502717f4SChen, Kenneth W void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, 74763551ae0SDavid Gibson unsigned long end) 74863551ae0SDavid Gibson { 74963551ae0SDavid Gibson struct mm_struct *mm = vma->vm_mm; 75063551ae0SDavid Gibson unsigned long address; 751c7546f8fSDavid Gibson pte_t *ptep; 75263551ae0SDavid Gibson pte_t pte; 75363551ae0SDavid Gibson struct page *page; 754fe1668aeSChen, Kenneth W struct page *tmp; 755c0a499c2SChen, Kenneth W /* 756c0a499c2SChen, Kenneth W * A page gathering list, protected by per file i_mmap_lock. The 757c0a499c2SChen, Kenneth W * lock is used to avoid list corruption from multiple unmapping 758c0a499c2SChen, Kenneth W * of the same page since we are using page->lru. 759c0a499c2SChen, Kenneth W */ 760fe1668aeSChen, Kenneth W LIST_HEAD(page_list); 76163551ae0SDavid Gibson 76263551ae0SDavid Gibson WARN_ON(!is_vm_hugetlb_page(vma)); 76363551ae0SDavid Gibson BUG_ON(start & ~HPAGE_MASK); 76463551ae0SDavid Gibson BUG_ON(end & ~HPAGE_MASK); 76563551ae0SDavid Gibson 766508034a3SHugh Dickins spin_lock(&mm->page_table_lock); 76763551ae0SDavid Gibson for (address = start; address < end; address += HPAGE_SIZE) { 768c7546f8fSDavid Gibson ptep = huge_pte_offset(mm, address); 769c7546f8fSDavid Gibson if (!ptep) 770c7546f8fSDavid Gibson continue; 771c7546f8fSDavid Gibson 77239dde65cSChen, Kenneth W if (huge_pmd_unshare(mm, &address, ptep)) 77339dde65cSChen, Kenneth W continue; 77439dde65cSChen, Kenneth W 775c7546f8fSDavid Gibson pte = huge_ptep_get_and_clear(mm, address, ptep); 77663551ae0SDavid Gibson if (pte_none(pte)) 77763551ae0SDavid Gibson continue; 778c7546f8fSDavid Gibson 77963551ae0SDavid Gibson page = pte_page(pte); 7806649a386SKen Chen if (pte_dirty(pte)) 7816649a386SKen Chen set_page_dirty(page); 782fe1668aeSChen, Kenneth W list_add(&page->lru, &page_list); 78363551ae0SDavid Gibson } 7841da177e4SLinus Torvalds spin_unlock(&mm->page_table_lock); 785508034a3SHugh Dickins flush_tlb_range(vma, start, end); 786fe1668aeSChen, Kenneth W list_for_each_entry_safe(page, tmp, &page_list, lru) { 787fe1668aeSChen, Kenneth W list_del(&page->lru); 788fe1668aeSChen, Kenneth W put_page(page); 789fe1668aeSChen, Kenneth W } 7901da177e4SLinus Torvalds } 79163551ae0SDavid Gibson 792502717f4SChen, Kenneth W void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, 793502717f4SChen, Kenneth W unsigned long end) 794502717f4SChen, Kenneth W { 795502717f4SChen, Kenneth W /* 796502717f4SChen, Kenneth W * It is undesirable to test vma->vm_file as it should be non-null 797502717f4SChen, Kenneth W * for valid hugetlb area. However, vm_file will be NULL in the error 798502717f4SChen, Kenneth W * cleanup path of do_mmap_pgoff. When hugetlbfs ->mmap method fails, 799502717f4SChen, Kenneth W * do_mmap_pgoff() nullifies vma->vm_file before calling this function 800502717f4SChen, Kenneth W * to clean up. Since no pte has actually been setup, it is safe to 801502717f4SChen, Kenneth W * do nothing in this case. 802502717f4SChen, Kenneth W */ 803502717f4SChen, Kenneth W if (vma->vm_file) { 804502717f4SChen, Kenneth W spin_lock(&vma->vm_file->f_mapping->i_mmap_lock); 805502717f4SChen, Kenneth W __unmap_hugepage_range(vma, start, end); 806502717f4SChen, Kenneth W spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock); 807502717f4SChen, Kenneth W } 808502717f4SChen, Kenneth W } 809502717f4SChen, Kenneth W 8101e8f889bSDavid Gibson static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, 8111e8f889bSDavid Gibson unsigned long address, pte_t *ptep, pte_t pte) 8121e8f889bSDavid Gibson { 8131e8f889bSDavid Gibson struct page *old_page, *new_page; 81479ac6ba4SDavid Gibson int avoidcopy; 8151e8f889bSDavid Gibson 8161e8f889bSDavid Gibson old_page = pte_page(pte); 8171e8f889bSDavid Gibson 8181e8f889bSDavid Gibson /* If no-one else is actually using this page, avoid the copy 8191e8f889bSDavid Gibson * and just make the page writable */ 8201e8f889bSDavid Gibson avoidcopy = (page_count(old_page) == 1); 8211e8f889bSDavid Gibson if (avoidcopy) { 8221e8f889bSDavid Gibson set_huge_ptep_writable(vma, address, ptep); 82383c54070SNick Piggin return 0; 8241e8f889bSDavid Gibson } 8251e8f889bSDavid Gibson 8261e8f889bSDavid Gibson page_cache_get(old_page); 8275da7ca86SChristoph Lameter new_page = alloc_huge_page(vma, address); 8281e8f889bSDavid Gibson 8292fc39cecSAdam Litke if (IS_ERR(new_page)) { 8301e8f889bSDavid Gibson page_cache_release(old_page); 8312fc39cecSAdam Litke return -PTR_ERR(new_page); 8321e8f889bSDavid Gibson } 8331e8f889bSDavid Gibson 8341e8f889bSDavid Gibson spin_unlock(&mm->page_table_lock); 8359de455b2SAtsushi Nemoto copy_huge_page(new_page, old_page, address, vma); 8360ed361deSNick Piggin __SetPageUptodate(new_page); 8371e8f889bSDavid Gibson spin_lock(&mm->page_table_lock); 8381e8f889bSDavid Gibson 8391e8f889bSDavid Gibson ptep = huge_pte_offset(mm, address & HPAGE_MASK); 8401e8f889bSDavid Gibson if (likely(pte_same(*ptep, pte))) { 8411e8f889bSDavid Gibson /* Break COW */ 8421e8f889bSDavid Gibson set_huge_pte_at(mm, address, ptep, 8431e8f889bSDavid Gibson make_huge_pte(vma, new_page, 1)); 8441e8f889bSDavid Gibson /* Make the old page be freed below */ 8451e8f889bSDavid Gibson new_page = old_page; 8461e8f889bSDavid Gibson } 8471e8f889bSDavid Gibson page_cache_release(new_page); 8481e8f889bSDavid Gibson page_cache_release(old_page); 84983c54070SNick Piggin return 0; 8501e8f889bSDavid Gibson } 8511e8f889bSDavid Gibson 852a1ed3ddaSRobert P. J. Day static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, 8531e8f889bSDavid Gibson unsigned long address, pte_t *ptep, int write_access) 854ac9b9c66SHugh Dickins { 855ac9b9c66SHugh Dickins int ret = VM_FAULT_SIGBUS; 8564c887265SAdam Litke unsigned long idx; 8574c887265SAdam Litke unsigned long size; 8584c887265SAdam Litke struct page *page; 8594c887265SAdam Litke struct address_space *mapping; 8601e8f889bSDavid Gibson pte_t new_pte; 8614c887265SAdam Litke 8624c887265SAdam Litke mapping = vma->vm_file->f_mapping; 8634c887265SAdam Litke idx = ((address - vma->vm_start) >> HPAGE_SHIFT) 8644c887265SAdam Litke + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT)); 8654c887265SAdam Litke 8664c887265SAdam Litke /* 8674c887265SAdam Litke * Use page lock to guard against racing truncation 8684c887265SAdam Litke * before we get page_table_lock. 8694c887265SAdam Litke */ 8706bda666aSChristoph Lameter retry: 8716bda666aSChristoph Lameter page = find_lock_page(mapping, idx); 8726bda666aSChristoph Lameter if (!page) { 873ebed4bfcSHugh Dickins size = i_size_read(mapping->host) >> HPAGE_SHIFT; 874ebed4bfcSHugh Dickins if (idx >= size) 875ebed4bfcSHugh Dickins goto out; 8766bda666aSChristoph Lameter page = alloc_huge_page(vma, address); 8772fc39cecSAdam Litke if (IS_ERR(page)) { 8782fc39cecSAdam Litke ret = -PTR_ERR(page); 8796bda666aSChristoph Lameter goto out; 8806bda666aSChristoph Lameter } 88179ac6ba4SDavid Gibson clear_huge_page(page, address); 8820ed361deSNick Piggin __SetPageUptodate(page); 883ac9b9c66SHugh Dickins 8846bda666aSChristoph Lameter if (vma->vm_flags & VM_SHARED) { 8856bda666aSChristoph Lameter int err; 88645c682a6SKen Chen struct inode *inode = mapping->host; 8876bda666aSChristoph Lameter 8886bda666aSChristoph Lameter err = add_to_page_cache(page, mapping, idx, GFP_KERNEL); 8896bda666aSChristoph Lameter if (err) { 8906bda666aSChristoph Lameter put_page(page); 8916bda666aSChristoph Lameter if (err == -EEXIST) 8926bda666aSChristoph Lameter goto retry; 8936bda666aSChristoph Lameter goto out; 8946bda666aSChristoph Lameter } 89545c682a6SKen Chen 89645c682a6SKen Chen spin_lock(&inode->i_lock); 89745c682a6SKen Chen inode->i_blocks += BLOCKS_PER_HUGEPAGE; 89845c682a6SKen Chen spin_unlock(&inode->i_lock); 8996bda666aSChristoph Lameter } else 9006bda666aSChristoph Lameter lock_page(page); 9016bda666aSChristoph Lameter } 9021e8f889bSDavid Gibson 903ac9b9c66SHugh Dickins spin_lock(&mm->page_table_lock); 9044c887265SAdam Litke size = i_size_read(mapping->host) >> HPAGE_SHIFT; 9054c887265SAdam Litke if (idx >= size) 9064c887265SAdam Litke goto backout; 9074c887265SAdam Litke 90883c54070SNick Piggin ret = 0; 90986e5216fSAdam Litke if (!pte_none(*ptep)) 9104c887265SAdam Litke goto backout; 9114c887265SAdam Litke 9121e8f889bSDavid Gibson new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE) 9131e8f889bSDavid Gibson && (vma->vm_flags & VM_SHARED))); 9141e8f889bSDavid Gibson set_huge_pte_at(mm, address, ptep, new_pte); 9151e8f889bSDavid Gibson 9161e8f889bSDavid Gibson if (write_access && !(vma->vm_flags & VM_SHARED)) { 9171e8f889bSDavid Gibson /* Optimization, do the COW without a second fault */ 9181e8f889bSDavid Gibson ret = hugetlb_cow(mm, vma, address, ptep, new_pte); 9191e8f889bSDavid Gibson } 9201e8f889bSDavid Gibson 921ac9b9c66SHugh Dickins spin_unlock(&mm->page_table_lock); 9224c887265SAdam Litke unlock_page(page); 9234c887265SAdam Litke out: 924ac9b9c66SHugh Dickins return ret; 9254c887265SAdam Litke 9264c887265SAdam Litke backout: 9274c887265SAdam Litke spin_unlock(&mm->page_table_lock); 9284c887265SAdam Litke unlock_page(page); 9294c887265SAdam Litke put_page(page); 9304c887265SAdam Litke goto out; 931ac9b9c66SHugh Dickins } 932ac9b9c66SHugh Dickins 93386e5216fSAdam Litke int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, 93486e5216fSAdam Litke unsigned long address, int write_access) 93586e5216fSAdam Litke { 93686e5216fSAdam Litke pte_t *ptep; 93786e5216fSAdam Litke pte_t entry; 9381e8f889bSDavid Gibson int ret; 9393935baa9SDavid Gibson static DEFINE_MUTEX(hugetlb_instantiation_mutex); 94086e5216fSAdam Litke 94186e5216fSAdam Litke ptep = huge_pte_alloc(mm, address); 94286e5216fSAdam Litke if (!ptep) 94386e5216fSAdam Litke return VM_FAULT_OOM; 94486e5216fSAdam Litke 9453935baa9SDavid Gibson /* 9463935baa9SDavid Gibson * Serialize hugepage allocation and instantiation, so that we don't 9473935baa9SDavid Gibson * get spurious allocation failures if two CPUs race to instantiate 9483935baa9SDavid Gibson * the same page in the page cache. 9493935baa9SDavid Gibson */ 9503935baa9SDavid Gibson mutex_lock(&hugetlb_instantiation_mutex); 95186e5216fSAdam Litke entry = *ptep; 9523935baa9SDavid Gibson if (pte_none(entry)) { 9533935baa9SDavid Gibson ret = hugetlb_no_page(mm, vma, address, ptep, write_access); 9543935baa9SDavid Gibson mutex_unlock(&hugetlb_instantiation_mutex); 9553935baa9SDavid Gibson return ret; 9563935baa9SDavid Gibson } 95786e5216fSAdam Litke 95883c54070SNick Piggin ret = 0; 9591e8f889bSDavid Gibson 9601e8f889bSDavid Gibson spin_lock(&mm->page_table_lock); 9611e8f889bSDavid Gibson /* Check for a racing update before calling hugetlb_cow */ 9621e8f889bSDavid Gibson if (likely(pte_same(entry, *ptep))) 9631e8f889bSDavid Gibson if (write_access && !pte_write(entry)) 9641e8f889bSDavid Gibson ret = hugetlb_cow(mm, vma, address, ptep, entry); 9651e8f889bSDavid Gibson spin_unlock(&mm->page_table_lock); 9663935baa9SDavid Gibson mutex_unlock(&hugetlb_instantiation_mutex); 9671e8f889bSDavid Gibson 9681e8f889bSDavid Gibson return ret; 96986e5216fSAdam Litke } 97086e5216fSAdam Litke 97163551ae0SDavid Gibson int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, 97263551ae0SDavid Gibson struct page **pages, struct vm_area_struct **vmas, 9735b23dbe8SAdam Litke unsigned long *position, int *length, int i, 9745b23dbe8SAdam Litke int write) 97563551ae0SDavid Gibson { 976d5d4b0aaSChen, Kenneth W unsigned long pfn_offset; 977d5d4b0aaSChen, Kenneth W unsigned long vaddr = *position; 97863551ae0SDavid Gibson int remainder = *length; 97963551ae0SDavid Gibson 9801c59827dSHugh Dickins spin_lock(&mm->page_table_lock); 98163551ae0SDavid Gibson while (vaddr < vma->vm_end && remainder) { 98263551ae0SDavid Gibson pte_t *pte; 98363551ae0SDavid Gibson struct page *page; 98463551ae0SDavid Gibson 9854c887265SAdam Litke /* 9864c887265SAdam Litke * Some archs (sparc64, sh*) have multiple pte_ts to 9874c887265SAdam Litke * each hugepage. We have to make * sure we get the 9884c887265SAdam Litke * first, for the page indexing below to work. 9894c887265SAdam Litke */ 99063551ae0SDavid Gibson pte = huge_pte_offset(mm, vaddr & HPAGE_MASK); 99163551ae0SDavid Gibson 99272fad713SAdam Litke if (!pte || pte_none(*pte) || (write && !pte_write(*pte))) { 9934c887265SAdam Litke int ret; 9944c887265SAdam Litke 9954c887265SAdam Litke spin_unlock(&mm->page_table_lock); 9965b23dbe8SAdam Litke ret = hugetlb_fault(mm, vma, vaddr, write); 9974c887265SAdam Litke spin_lock(&mm->page_table_lock); 998a89182c7SAdam Litke if (!(ret & VM_FAULT_ERROR)) 9994c887265SAdam Litke continue; 10004c887265SAdam Litke 10011c59827dSHugh Dickins remainder = 0; 10021c59827dSHugh Dickins if (!i) 10031c59827dSHugh Dickins i = -EFAULT; 10041c59827dSHugh Dickins break; 10051c59827dSHugh Dickins } 100663551ae0SDavid Gibson 1007d5d4b0aaSChen, Kenneth W pfn_offset = (vaddr & ~HPAGE_MASK) >> PAGE_SHIFT; 1008d5d4b0aaSChen, Kenneth W page = pte_page(*pte); 1009d5d4b0aaSChen, Kenneth W same_page: 1010d6692183SChen, Kenneth W if (pages) { 101163551ae0SDavid Gibson get_page(page); 1012d5d4b0aaSChen, Kenneth W pages[i] = page + pfn_offset; 1013d6692183SChen, Kenneth W } 101463551ae0SDavid Gibson 101563551ae0SDavid Gibson if (vmas) 101663551ae0SDavid Gibson vmas[i] = vma; 101763551ae0SDavid Gibson 101863551ae0SDavid Gibson vaddr += PAGE_SIZE; 1019d5d4b0aaSChen, Kenneth W ++pfn_offset; 102063551ae0SDavid Gibson --remainder; 102163551ae0SDavid Gibson ++i; 1022d5d4b0aaSChen, Kenneth W if (vaddr < vma->vm_end && remainder && 1023d5d4b0aaSChen, Kenneth W pfn_offset < HPAGE_SIZE/PAGE_SIZE) { 1024d5d4b0aaSChen, Kenneth W /* 1025d5d4b0aaSChen, Kenneth W * We use pfn_offset to avoid touching the pageframes 1026d5d4b0aaSChen, Kenneth W * of this compound page. 1027d5d4b0aaSChen, Kenneth W */ 1028d5d4b0aaSChen, Kenneth W goto same_page; 1029d5d4b0aaSChen, Kenneth W } 103063551ae0SDavid Gibson } 10311c59827dSHugh Dickins spin_unlock(&mm->page_table_lock); 103263551ae0SDavid Gibson *length = remainder; 103363551ae0SDavid Gibson *position = vaddr; 103463551ae0SDavid Gibson 103563551ae0SDavid Gibson return i; 103663551ae0SDavid Gibson } 10378f860591SZhang, Yanmin 10388f860591SZhang, Yanmin void hugetlb_change_protection(struct vm_area_struct *vma, 10398f860591SZhang, Yanmin unsigned long address, unsigned long end, pgprot_t newprot) 10408f860591SZhang, Yanmin { 10418f860591SZhang, Yanmin struct mm_struct *mm = vma->vm_mm; 10428f860591SZhang, Yanmin unsigned long start = address; 10438f860591SZhang, Yanmin pte_t *ptep; 10448f860591SZhang, Yanmin pte_t pte; 10458f860591SZhang, Yanmin 10468f860591SZhang, Yanmin BUG_ON(address >= end); 10478f860591SZhang, Yanmin flush_cache_range(vma, address, end); 10488f860591SZhang, Yanmin 104939dde65cSChen, Kenneth W spin_lock(&vma->vm_file->f_mapping->i_mmap_lock); 10508f860591SZhang, Yanmin spin_lock(&mm->page_table_lock); 10518f860591SZhang, Yanmin for (; address < end; address += HPAGE_SIZE) { 10528f860591SZhang, Yanmin ptep = huge_pte_offset(mm, address); 10538f860591SZhang, Yanmin if (!ptep) 10548f860591SZhang, Yanmin continue; 105539dde65cSChen, Kenneth W if (huge_pmd_unshare(mm, &address, ptep)) 105639dde65cSChen, Kenneth W continue; 10578f860591SZhang, Yanmin if (!pte_none(*ptep)) { 10588f860591SZhang, Yanmin pte = huge_ptep_get_and_clear(mm, address, ptep); 10598f860591SZhang, Yanmin pte = pte_mkhuge(pte_modify(pte, newprot)); 10608f860591SZhang, Yanmin set_huge_pte_at(mm, address, ptep, pte); 10618f860591SZhang, Yanmin } 10628f860591SZhang, Yanmin } 10638f860591SZhang, Yanmin spin_unlock(&mm->page_table_lock); 106439dde65cSChen, Kenneth W spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock); 10658f860591SZhang, Yanmin 10668f860591SZhang, Yanmin flush_tlb_range(vma, start, end); 10678f860591SZhang, Yanmin } 10688f860591SZhang, Yanmin 1069a43a8c39SChen, Kenneth W struct file_region { 1070a43a8c39SChen, Kenneth W struct list_head link; 1071a43a8c39SChen, Kenneth W long from; 1072a43a8c39SChen, Kenneth W long to; 1073a43a8c39SChen, Kenneth W }; 1074a43a8c39SChen, Kenneth W 1075a43a8c39SChen, Kenneth W static long region_add(struct list_head *head, long f, long t) 1076a43a8c39SChen, Kenneth W { 1077a43a8c39SChen, Kenneth W struct file_region *rg, *nrg, *trg; 1078a43a8c39SChen, Kenneth W 1079a43a8c39SChen, Kenneth W /* Locate the region we are either in or before. */ 1080a43a8c39SChen, Kenneth W list_for_each_entry(rg, head, link) 1081a43a8c39SChen, Kenneth W if (f <= rg->to) 1082a43a8c39SChen, Kenneth W break; 1083a43a8c39SChen, Kenneth W 1084a43a8c39SChen, Kenneth W /* Round our left edge to the current segment if it encloses us. */ 1085a43a8c39SChen, Kenneth W if (f > rg->from) 1086a43a8c39SChen, Kenneth W f = rg->from; 1087a43a8c39SChen, Kenneth W 1088a43a8c39SChen, Kenneth W /* Check for and consume any regions we now overlap with. */ 1089a43a8c39SChen, Kenneth W nrg = rg; 1090a43a8c39SChen, Kenneth W list_for_each_entry_safe(rg, trg, rg->link.prev, link) { 1091a43a8c39SChen, Kenneth W if (&rg->link == head) 1092a43a8c39SChen, Kenneth W break; 1093a43a8c39SChen, Kenneth W if (rg->from > t) 1094a43a8c39SChen, Kenneth W break; 1095a43a8c39SChen, Kenneth W 1096a43a8c39SChen, Kenneth W /* If this area reaches higher then extend our area to 1097a43a8c39SChen, Kenneth W * include it completely. If this is not the first area 1098a43a8c39SChen, Kenneth W * which we intend to reuse, free it. */ 1099a43a8c39SChen, Kenneth W if (rg->to > t) 1100a43a8c39SChen, Kenneth W t = rg->to; 1101a43a8c39SChen, Kenneth W if (rg != nrg) { 1102a43a8c39SChen, Kenneth W list_del(&rg->link); 1103a43a8c39SChen, Kenneth W kfree(rg); 1104a43a8c39SChen, Kenneth W } 1105a43a8c39SChen, Kenneth W } 1106a43a8c39SChen, Kenneth W nrg->from = f; 1107a43a8c39SChen, Kenneth W nrg->to = t; 1108a43a8c39SChen, Kenneth W return 0; 1109a43a8c39SChen, Kenneth W } 1110a43a8c39SChen, Kenneth W 1111a43a8c39SChen, Kenneth W static long region_chg(struct list_head *head, long f, long t) 1112a43a8c39SChen, Kenneth W { 1113a43a8c39SChen, Kenneth W struct file_region *rg, *nrg; 1114a43a8c39SChen, Kenneth W long chg = 0; 1115a43a8c39SChen, Kenneth W 1116a43a8c39SChen, Kenneth W /* Locate the region we are before or in. */ 1117a43a8c39SChen, Kenneth W list_for_each_entry(rg, head, link) 1118a43a8c39SChen, Kenneth W if (f <= rg->to) 1119a43a8c39SChen, Kenneth W break; 1120a43a8c39SChen, Kenneth W 1121a43a8c39SChen, Kenneth W /* If we are below the current region then a new region is required. 1122a43a8c39SChen, Kenneth W * Subtle, allocate a new region at the position but make it zero 1123183ff22bSSimon Arlott * size such that we can guarantee to record the reservation. */ 1124a43a8c39SChen, Kenneth W if (&rg->link == head || t < rg->from) { 1125a43a8c39SChen, Kenneth W nrg = kmalloc(sizeof(*nrg), GFP_KERNEL); 1126c80544dcSStephen Hemminger if (!nrg) 1127a43a8c39SChen, Kenneth W return -ENOMEM; 1128a43a8c39SChen, Kenneth W nrg->from = f; 1129a43a8c39SChen, Kenneth W nrg->to = f; 1130a43a8c39SChen, Kenneth W INIT_LIST_HEAD(&nrg->link); 1131a43a8c39SChen, Kenneth W list_add(&nrg->link, rg->link.prev); 1132a43a8c39SChen, Kenneth W 1133a43a8c39SChen, Kenneth W return t - f; 1134a43a8c39SChen, Kenneth W } 1135a43a8c39SChen, Kenneth W 1136a43a8c39SChen, Kenneth W /* Round our left edge to the current segment if it encloses us. */ 1137a43a8c39SChen, Kenneth W if (f > rg->from) 1138a43a8c39SChen, Kenneth W f = rg->from; 1139a43a8c39SChen, Kenneth W chg = t - f; 1140a43a8c39SChen, Kenneth W 1141a43a8c39SChen, Kenneth W /* Check for and consume any regions we now overlap with. */ 1142a43a8c39SChen, Kenneth W list_for_each_entry(rg, rg->link.prev, link) { 1143a43a8c39SChen, Kenneth W if (&rg->link == head) 1144a43a8c39SChen, Kenneth W break; 1145a43a8c39SChen, Kenneth W if (rg->from > t) 1146a43a8c39SChen, Kenneth W return chg; 1147a43a8c39SChen, Kenneth W 1148a43a8c39SChen, Kenneth W /* We overlap with this area, if it extends futher than 1149a43a8c39SChen, Kenneth W * us then we must extend ourselves. Account for its 1150a43a8c39SChen, Kenneth W * existing reservation. */ 1151a43a8c39SChen, Kenneth W if (rg->to > t) { 1152a43a8c39SChen, Kenneth W chg += rg->to - t; 1153a43a8c39SChen, Kenneth W t = rg->to; 1154a43a8c39SChen, Kenneth W } 1155a43a8c39SChen, Kenneth W chg -= rg->to - rg->from; 1156a43a8c39SChen, Kenneth W } 1157a43a8c39SChen, Kenneth W return chg; 1158a43a8c39SChen, Kenneth W } 1159a43a8c39SChen, Kenneth W 1160a43a8c39SChen, Kenneth W static long region_truncate(struct list_head *head, long end) 1161a43a8c39SChen, Kenneth W { 1162a43a8c39SChen, Kenneth W struct file_region *rg, *trg; 1163a43a8c39SChen, Kenneth W long chg = 0; 1164a43a8c39SChen, Kenneth W 1165a43a8c39SChen, Kenneth W /* Locate the region we are either in or before. */ 1166a43a8c39SChen, Kenneth W list_for_each_entry(rg, head, link) 1167a43a8c39SChen, Kenneth W if (end <= rg->to) 1168a43a8c39SChen, Kenneth W break; 1169a43a8c39SChen, Kenneth W if (&rg->link == head) 1170a43a8c39SChen, Kenneth W return 0; 1171a43a8c39SChen, Kenneth W 1172a43a8c39SChen, Kenneth W /* If we are in the middle of a region then adjust it. */ 1173a43a8c39SChen, Kenneth W if (end > rg->from) { 1174a43a8c39SChen, Kenneth W chg = rg->to - end; 1175a43a8c39SChen, Kenneth W rg->to = end; 1176a43a8c39SChen, Kenneth W rg = list_entry(rg->link.next, typeof(*rg), link); 1177a43a8c39SChen, Kenneth W } 1178a43a8c39SChen, Kenneth W 1179a43a8c39SChen, Kenneth W /* Drop any remaining regions. */ 1180a43a8c39SChen, Kenneth W list_for_each_entry_safe(rg, trg, rg->link.prev, link) { 1181a43a8c39SChen, Kenneth W if (&rg->link == head) 1182a43a8c39SChen, Kenneth W break; 1183a43a8c39SChen, Kenneth W chg += rg->to - rg->from; 1184a43a8c39SChen, Kenneth W list_del(&rg->link); 1185a43a8c39SChen, Kenneth W kfree(rg); 1186a43a8c39SChen, Kenneth W } 1187a43a8c39SChen, Kenneth W return chg; 1188a43a8c39SChen, Kenneth W } 1189a43a8c39SChen, Kenneth W 1190a43a8c39SChen, Kenneth W static int hugetlb_acct_memory(long delta) 1191a43a8c39SChen, Kenneth W { 1192a43a8c39SChen, Kenneth W int ret = -ENOMEM; 1193a43a8c39SChen, Kenneth W 1194a43a8c39SChen, Kenneth W spin_lock(&hugetlb_lock); 11958a630112SKen Chen /* 11968a630112SKen Chen * When cpuset is configured, it breaks the strict hugetlb page 11978a630112SKen Chen * reservation as the accounting is done on a global variable. Such 11988a630112SKen Chen * reservation is completely rubbish in the presence of cpuset because 11998a630112SKen Chen * the reservation is not checked against page availability for the 12008a630112SKen Chen * current cpuset. Application can still potentially OOM'ed by kernel 12018a630112SKen Chen * with lack of free htlb page in cpuset that the task is in. 12028a630112SKen Chen * Attempt to enforce strict accounting with cpuset is almost 12038a630112SKen Chen * impossible (or too ugly) because cpuset is too fluid that 12048a630112SKen Chen * task or memory node can be dynamically moved between cpusets. 12058a630112SKen Chen * 12068a630112SKen Chen * The change of semantics for shared hugetlb mapping with cpuset is 12078a630112SKen Chen * undesirable. However, in order to preserve some of the semantics, 12088a630112SKen Chen * we fall back to check against current free page availability as 12098a630112SKen Chen * a best attempt and hopefully to minimize the impact of changing 12108a630112SKen Chen * semantics that cpuset has. 12118a630112SKen Chen */ 1212e4e574b7SAdam Litke if (delta > 0) { 1213e4e574b7SAdam Litke if (gather_surplus_pages(delta) < 0) 1214e4e574b7SAdam Litke goto out; 1215e4e574b7SAdam Litke 1216ac09b3a1SAdam Litke if (delta > cpuset_mems_nr(free_huge_pages_node)) { 1217ac09b3a1SAdam Litke return_unused_surplus_pages(delta); 1218e4e574b7SAdam Litke goto out; 1219e4e574b7SAdam Litke } 1220ac09b3a1SAdam Litke } 1221e4e574b7SAdam Litke 1222e4e574b7SAdam Litke ret = 0; 1223e4e574b7SAdam Litke if (delta < 0) 1224e4e574b7SAdam Litke return_unused_surplus_pages((unsigned long) -delta); 1225e4e574b7SAdam Litke 1226e4e574b7SAdam Litke out: 1227e4e574b7SAdam Litke spin_unlock(&hugetlb_lock); 1228e4e574b7SAdam Litke return ret; 1229e4e574b7SAdam Litke } 1230e4e574b7SAdam Litke 1231e4e574b7SAdam Litke int hugetlb_reserve_pages(struct inode *inode, long from, long to) 1232e4e574b7SAdam Litke { 1233e4e574b7SAdam Litke long ret, chg; 1234e4e574b7SAdam Litke 1235e4e574b7SAdam Litke chg = region_chg(&inode->i_mapping->private_list, from, to); 1236e4e574b7SAdam Litke if (chg < 0) 1237e4e574b7SAdam Litke return chg; 12388a630112SKen Chen 123990d8b7e6SAdam Litke if (hugetlb_get_quota(inode->i_mapping, chg)) 124090d8b7e6SAdam Litke return -ENOSPC; 1241a43a8c39SChen, Kenneth W ret = hugetlb_acct_memory(chg); 124268842c9bSKen Chen if (ret < 0) { 124368842c9bSKen Chen hugetlb_put_quota(inode->i_mapping, chg); 1244a43a8c39SChen, Kenneth W return ret; 124568842c9bSKen Chen } 1246a43a8c39SChen, Kenneth W region_add(&inode->i_mapping->private_list, from, to); 1247a43a8c39SChen, Kenneth W return 0; 1248a43a8c39SChen, Kenneth W } 1249a43a8c39SChen, Kenneth W 1250a43a8c39SChen, Kenneth W void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed) 1251a43a8c39SChen, Kenneth W { 1252a43a8c39SChen, Kenneth W long chg = region_truncate(&inode->i_mapping->private_list, offset); 125345c682a6SKen Chen 125445c682a6SKen Chen spin_lock(&inode->i_lock); 125545c682a6SKen Chen inode->i_blocks -= BLOCKS_PER_HUGEPAGE * freed; 125645c682a6SKen Chen spin_unlock(&inode->i_lock); 125745c682a6SKen Chen 125890d8b7e6SAdam Litke hugetlb_put_quota(inode->i_mapping, (chg - freed)); 125990d8b7e6SAdam Litke hugetlb_acct_memory(-(chg - freed)); 1260a43a8c39SChen, Kenneth W } 1261