11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * Generic hugetlb support. 31da177e4SLinus Torvalds * (C) William Irwin, April 2004 41da177e4SLinus Torvalds */ 51da177e4SLinus Torvalds #include <linux/gfp.h> 61da177e4SLinus Torvalds #include <linux/list.h> 71da177e4SLinus Torvalds #include <linux/init.h> 81da177e4SLinus Torvalds #include <linux/module.h> 91da177e4SLinus Torvalds #include <linux/mm.h> 101da177e4SLinus Torvalds #include <linux/sysctl.h> 111da177e4SLinus Torvalds #include <linux/highmem.h> 121da177e4SLinus Torvalds #include <linux/nodemask.h> 1363551ae0SDavid Gibson #include <linux/pagemap.h> 145da7ca86SChristoph Lameter #include <linux/mempolicy.h> 15aea47ff3SChristoph Lameter #include <linux/cpuset.h> 163935baa9SDavid Gibson #include <linux/mutex.h> 175da7ca86SChristoph Lameter 1863551ae0SDavid Gibson #include <asm/page.h> 1963551ae0SDavid Gibson #include <asm/pgtable.h> 2063551ae0SDavid Gibson 2163551ae0SDavid Gibson #include <linux/hugetlb.h> 227835e98bSNick Piggin #include "internal.h" 231da177e4SLinus Torvalds 241da177e4SLinus Torvalds const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL; 25a43a8c39SChen, Kenneth W static unsigned long nr_huge_pages, free_huge_pages, resv_huge_pages; 267893d1d5SAdam Litke static unsigned long surplus_huge_pages; 271da177e4SLinus Torvalds unsigned long max_huge_pages; 281da177e4SLinus Torvalds static struct list_head hugepage_freelists[MAX_NUMNODES]; 291da177e4SLinus Torvalds static unsigned int nr_huge_pages_node[MAX_NUMNODES]; 301da177e4SLinus Torvalds static unsigned int free_huge_pages_node[MAX_NUMNODES]; 317893d1d5SAdam Litke static unsigned int surplus_huge_pages_node[MAX_NUMNODES]; 32396faf03SMel Gorman static gfp_t htlb_alloc_mask = GFP_HIGHUSER; 33396faf03SMel Gorman unsigned long hugepages_treat_as_movable; 3454f9f80dSAdam Litke int hugetlb_dynamic_pool; 3563b4613cSNishanth Aravamudan static int hugetlb_next_nid; 36396faf03SMel Gorman 373935baa9SDavid Gibson /* 383935baa9SDavid Gibson * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages 393935baa9SDavid Gibson */ 403935baa9SDavid Gibson static DEFINE_SPINLOCK(hugetlb_lock); 410bd0f9fbSEric Paris 4279ac6ba4SDavid Gibson static void clear_huge_page(struct page *page, unsigned long addr) 4379ac6ba4SDavid Gibson { 4479ac6ba4SDavid Gibson int i; 4579ac6ba4SDavid Gibson 4679ac6ba4SDavid Gibson might_sleep(); 4779ac6ba4SDavid Gibson for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); i++) { 4879ac6ba4SDavid Gibson cond_resched(); 49281e0e3bSRalf Baechle clear_user_highpage(page + i, addr + i * PAGE_SIZE); 5079ac6ba4SDavid Gibson } 5179ac6ba4SDavid Gibson } 5279ac6ba4SDavid Gibson 5379ac6ba4SDavid Gibson static void copy_huge_page(struct page *dst, struct page *src, 549de455b2SAtsushi Nemoto unsigned long addr, struct vm_area_struct *vma) 5579ac6ba4SDavid Gibson { 5679ac6ba4SDavid Gibson int i; 5779ac6ba4SDavid Gibson 5879ac6ba4SDavid Gibson might_sleep(); 5979ac6ba4SDavid Gibson for (i = 0; i < HPAGE_SIZE/PAGE_SIZE; i++) { 6079ac6ba4SDavid Gibson cond_resched(); 619de455b2SAtsushi Nemoto copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma); 6279ac6ba4SDavid Gibson } 6379ac6ba4SDavid Gibson } 6479ac6ba4SDavid Gibson 651da177e4SLinus Torvalds static void enqueue_huge_page(struct page *page) 661da177e4SLinus Torvalds { 671da177e4SLinus Torvalds int nid = page_to_nid(page); 681da177e4SLinus Torvalds list_add(&page->lru, &hugepage_freelists[nid]); 691da177e4SLinus Torvalds free_huge_pages++; 701da177e4SLinus Torvalds free_huge_pages_node[nid]++; 711da177e4SLinus Torvalds } 721da177e4SLinus Torvalds 735da7ca86SChristoph Lameter static struct page *dequeue_huge_page(struct vm_area_struct *vma, 745da7ca86SChristoph Lameter unsigned long address) 751da177e4SLinus Torvalds { 7631a5c6e4SNishanth Aravamudan int nid; 771da177e4SLinus Torvalds struct page *page = NULL; 78480eccf9SLee Schermerhorn struct mempolicy *mpol; 79396faf03SMel Gorman struct zonelist *zonelist = huge_zonelist(vma, address, 80480eccf9SLee Schermerhorn htlb_alloc_mask, &mpol); 8196df9333SChristoph Lameter struct zone **z; 821da177e4SLinus Torvalds 8396df9333SChristoph Lameter for (z = zonelist->zones; *z; z++) { 8489fa3024SChristoph Lameter nid = zone_to_nid(*z); 85396faf03SMel Gorman if (cpuset_zone_allowed_softwall(*z, htlb_alloc_mask) && 863abf7afdSAndrew Morton !list_empty(&hugepage_freelists[nid])) { 871da177e4SLinus Torvalds page = list_entry(hugepage_freelists[nid].next, 881da177e4SLinus Torvalds struct page, lru); 891da177e4SLinus Torvalds list_del(&page->lru); 901da177e4SLinus Torvalds free_huge_pages--; 911da177e4SLinus Torvalds free_huge_pages_node[nid]--; 92e4e574b7SAdam Litke if (vma && vma->vm_flags & VM_MAYSHARE) 93e4e574b7SAdam Litke resv_huge_pages--; 945ab3ee7bSKen Chen break; 951da177e4SLinus Torvalds } 963abf7afdSAndrew Morton } 97480eccf9SLee Schermerhorn mpol_free(mpol); /* unref if mpol !NULL */ 981da177e4SLinus Torvalds return page; 991da177e4SLinus Torvalds } 1001da177e4SLinus Torvalds 1016af2acb6SAdam Litke static void update_and_free_page(struct page *page) 1026af2acb6SAdam Litke { 1036af2acb6SAdam Litke int i; 1046af2acb6SAdam Litke nr_huge_pages--; 1056af2acb6SAdam Litke nr_huge_pages_node[page_to_nid(page)]--; 1066af2acb6SAdam Litke for (i = 0; i < (HPAGE_SIZE / PAGE_SIZE); i++) { 1076af2acb6SAdam Litke page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced | 1086af2acb6SAdam Litke 1 << PG_dirty | 1 << PG_active | 1 << PG_reserved | 1096af2acb6SAdam Litke 1 << PG_private | 1<< PG_writeback); 1106af2acb6SAdam Litke } 1116af2acb6SAdam Litke set_compound_page_dtor(page, NULL); 1126af2acb6SAdam Litke set_page_refcounted(page); 1136af2acb6SAdam Litke __free_pages(page, HUGETLB_PAGE_ORDER); 1146af2acb6SAdam Litke } 1156af2acb6SAdam Litke 11627a85ef1SDavid Gibson static void free_huge_page(struct page *page) 11727a85ef1SDavid Gibson { 1187893d1d5SAdam Litke int nid = page_to_nid(page); 11927a85ef1SDavid Gibson 1207893d1d5SAdam Litke BUG_ON(page_count(page)); 12127a85ef1SDavid Gibson INIT_LIST_HEAD(&page->lru); 12227a85ef1SDavid Gibson 12327a85ef1SDavid Gibson spin_lock(&hugetlb_lock); 1247893d1d5SAdam Litke if (surplus_huge_pages_node[nid]) { 1257893d1d5SAdam Litke update_and_free_page(page); 1267893d1d5SAdam Litke surplus_huge_pages--; 1277893d1d5SAdam Litke surplus_huge_pages_node[nid]--; 1287893d1d5SAdam Litke } else { 12927a85ef1SDavid Gibson enqueue_huge_page(page); 1307893d1d5SAdam Litke } 13127a85ef1SDavid Gibson spin_unlock(&hugetlb_lock); 13227a85ef1SDavid Gibson } 13327a85ef1SDavid Gibson 1347893d1d5SAdam Litke /* 1357893d1d5SAdam Litke * Increment or decrement surplus_huge_pages. Keep node-specific counters 1367893d1d5SAdam Litke * balanced by operating on them in a round-robin fashion. 1377893d1d5SAdam Litke * Returns 1 if an adjustment was made. 1387893d1d5SAdam Litke */ 1397893d1d5SAdam Litke static int adjust_pool_surplus(int delta) 1407893d1d5SAdam Litke { 1417893d1d5SAdam Litke static int prev_nid; 1427893d1d5SAdam Litke int nid = prev_nid; 1437893d1d5SAdam Litke int ret = 0; 1447893d1d5SAdam Litke 1457893d1d5SAdam Litke VM_BUG_ON(delta != -1 && delta != 1); 1467893d1d5SAdam Litke do { 1477893d1d5SAdam Litke nid = next_node(nid, node_online_map); 1487893d1d5SAdam Litke if (nid == MAX_NUMNODES) 1497893d1d5SAdam Litke nid = first_node(node_online_map); 1507893d1d5SAdam Litke 1517893d1d5SAdam Litke /* To shrink on this node, there must be a surplus page */ 1527893d1d5SAdam Litke if (delta < 0 && !surplus_huge_pages_node[nid]) 1537893d1d5SAdam Litke continue; 1547893d1d5SAdam Litke /* Surplus cannot exceed the total number of pages */ 1557893d1d5SAdam Litke if (delta > 0 && surplus_huge_pages_node[nid] >= 1567893d1d5SAdam Litke nr_huge_pages_node[nid]) 1577893d1d5SAdam Litke continue; 1587893d1d5SAdam Litke 1597893d1d5SAdam Litke surplus_huge_pages += delta; 1607893d1d5SAdam Litke surplus_huge_pages_node[nid] += delta; 1617893d1d5SAdam Litke ret = 1; 1627893d1d5SAdam Litke break; 1637893d1d5SAdam Litke } while (nid != prev_nid); 1647893d1d5SAdam Litke 1657893d1d5SAdam Litke prev_nid = nid; 1667893d1d5SAdam Litke return ret; 1677893d1d5SAdam Litke } 1687893d1d5SAdam Litke 16963b4613cSNishanth Aravamudan static struct page *alloc_fresh_huge_page_node(int nid) 1701da177e4SLinus Torvalds { 1711da177e4SLinus Torvalds struct page *page; 172f96efd58SJoe Jin 17363b4613cSNishanth Aravamudan page = alloc_pages_node(nid, 17463b4613cSNishanth Aravamudan htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|__GFP_NOWARN, 175f96efd58SJoe Jin HUGETLB_PAGE_ORDER); 1761da177e4SLinus Torvalds if (page) { 17733f2ef89SAndy Whitcroft set_compound_page_dtor(page, free_huge_page); 1780bd0f9fbSEric Paris spin_lock(&hugetlb_lock); 1791da177e4SLinus Torvalds nr_huge_pages++; 18063b4613cSNishanth Aravamudan nr_huge_pages_node[nid]++; 1810bd0f9fbSEric Paris spin_unlock(&hugetlb_lock); 182a482289dSNick Piggin put_page(page); /* free it into the hugepage allocator */ 1831da177e4SLinus Torvalds } 18463b4613cSNishanth Aravamudan 18563b4613cSNishanth Aravamudan return page; 18663b4613cSNishanth Aravamudan } 18763b4613cSNishanth Aravamudan 18863b4613cSNishanth Aravamudan static int alloc_fresh_huge_page(void) 18963b4613cSNishanth Aravamudan { 19063b4613cSNishanth Aravamudan struct page *page; 19163b4613cSNishanth Aravamudan int start_nid; 19263b4613cSNishanth Aravamudan int next_nid; 19363b4613cSNishanth Aravamudan int ret = 0; 19463b4613cSNishanth Aravamudan 19563b4613cSNishanth Aravamudan start_nid = hugetlb_next_nid; 19663b4613cSNishanth Aravamudan 19763b4613cSNishanth Aravamudan do { 19863b4613cSNishanth Aravamudan page = alloc_fresh_huge_page_node(hugetlb_next_nid); 19963b4613cSNishanth Aravamudan if (page) 20063b4613cSNishanth Aravamudan ret = 1; 20163b4613cSNishanth Aravamudan /* 20263b4613cSNishanth Aravamudan * Use a helper variable to find the next node and then 20363b4613cSNishanth Aravamudan * copy it back to hugetlb_next_nid afterwards: 20463b4613cSNishanth Aravamudan * otherwise there's a window in which a racer might 20563b4613cSNishanth Aravamudan * pass invalid nid MAX_NUMNODES to alloc_pages_node. 20663b4613cSNishanth Aravamudan * But we don't need to use a spin_lock here: it really 20763b4613cSNishanth Aravamudan * doesn't matter if occasionally a racer chooses the 20863b4613cSNishanth Aravamudan * same nid as we do. Move nid forward in the mask even 20963b4613cSNishanth Aravamudan * if we just successfully allocated a hugepage so that 21063b4613cSNishanth Aravamudan * the next caller gets hugepages on the next node. 21163b4613cSNishanth Aravamudan */ 21263b4613cSNishanth Aravamudan next_nid = next_node(hugetlb_next_nid, node_online_map); 21363b4613cSNishanth Aravamudan if (next_nid == MAX_NUMNODES) 21463b4613cSNishanth Aravamudan next_nid = first_node(node_online_map); 21563b4613cSNishanth Aravamudan hugetlb_next_nid = next_nid; 21663b4613cSNishanth Aravamudan } while (!page && hugetlb_next_nid != start_nid); 21763b4613cSNishanth Aravamudan 21863b4613cSNishanth Aravamudan return ret; 2191da177e4SLinus Torvalds } 2201da177e4SLinus Torvalds 2217893d1d5SAdam Litke static struct page *alloc_buddy_huge_page(struct vm_area_struct *vma, 2227893d1d5SAdam Litke unsigned long address) 2237893d1d5SAdam Litke { 2247893d1d5SAdam Litke struct page *page; 2257893d1d5SAdam Litke 22654f9f80dSAdam Litke /* Check if the dynamic pool is enabled */ 22754f9f80dSAdam Litke if (!hugetlb_dynamic_pool) 22854f9f80dSAdam Litke return NULL; 22954f9f80dSAdam Litke 2307893d1d5SAdam Litke page = alloc_pages(htlb_alloc_mask|__GFP_COMP|__GFP_NOWARN, 2317893d1d5SAdam Litke HUGETLB_PAGE_ORDER); 2327893d1d5SAdam Litke if (page) { 2337893d1d5SAdam Litke set_compound_page_dtor(page, free_huge_page); 2347893d1d5SAdam Litke spin_lock(&hugetlb_lock); 2357893d1d5SAdam Litke nr_huge_pages++; 2367893d1d5SAdam Litke nr_huge_pages_node[page_to_nid(page)]++; 2377893d1d5SAdam Litke surplus_huge_pages++; 2387893d1d5SAdam Litke surplus_huge_pages_node[page_to_nid(page)]++; 2397893d1d5SAdam Litke spin_unlock(&hugetlb_lock); 2407893d1d5SAdam Litke } 2417893d1d5SAdam Litke 2427893d1d5SAdam Litke return page; 2437893d1d5SAdam Litke } 2447893d1d5SAdam Litke 245e4e574b7SAdam Litke /* 246e4e574b7SAdam Litke * Increase the hugetlb pool such that it can accomodate a reservation 247e4e574b7SAdam Litke * of size 'delta'. 248e4e574b7SAdam Litke */ 249e4e574b7SAdam Litke static int gather_surplus_pages(int delta) 250e4e574b7SAdam Litke { 251e4e574b7SAdam Litke struct list_head surplus_list; 252e4e574b7SAdam Litke struct page *page, *tmp; 253e4e574b7SAdam Litke int ret, i; 254e4e574b7SAdam Litke int needed, allocated; 255e4e574b7SAdam Litke 256e4e574b7SAdam Litke needed = (resv_huge_pages + delta) - free_huge_pages; 257e4e574b7SAdam Litke if (needed <= 0) 258e4e574b7SAdam Litke return 0; 259e4e574b7SAdam Litke 260e4e574b7SAdam Litke allocated = 0; 261e4e574b7SAdam Litke INIT_LIST_HEAD(&surplus_list); 262e4e574b7SAdam Litke 263e4e574b7SAdam Litke ret = -ENOMEM; 264e4e574b7SAdam Litke retry: 265e4e574b7SAdam Litke spin_unlock(&hugetlb_lock); 266e4e574b7SAdam Litke for (i = 0; i < needed; i++) { 267e4e574b7SAdam Litke page = alloc_buddy_huge_page(NULL, 0); 268e4e574b7SAdam Litke if (!page) { 269e4e574b7SAdam Litke /* 270e4e574b7SAdam Litke * We were not able to allocate enough pages to 271e4e574b7SAdam Litke * satisfy the entire reservation so we free what 272e4e574b7SAdam Litke * we've allocated so far. 273e4e574b7SAdam Litke */ 274e4e574b7SAdam Litke spin_lock(&hugetlb_lock); 275e4e574b7SAdam Litke needed = 0; 276e4e574b7SAdam Litke goto free; 277e4e574b7SAdam Litke } 278e4e574b7SAdam Litke 279e4e574b7SAdam Litke list_add(&page->lru, &surplus_list); 280e4e574b7SAdam Litke } 281e4e574b7SAdam Litke allocated += needed; 282e4e574b7SAdam Litke 283e4e574b7SAdam Litke /* 284e4e574b7SAdam Litke * After retaking hugetlb_lock, we need to recalculate 'needed' 285e4e574b7SAdam Litke * because either resv_huge_pages or free_huge_pages may have changed. 286e4e574b7SAdam Litke */ 287e4e574b7SAdam Litke spin_lock(&hugetlb_lock); 288e4e574b7SAdam Litke needed = (resv_huge_pages + delta) - (free_huge_pages + allocated); 289e4e574b7SAdam Litke if (needed > 0) 290e4e574b7SAdam Litke goto retry; 291e4e574b7SAdam Litke 292e4e574b7SAdam Litke /* 293e4e574b7SAdam Litke * The surplus_list now contains _at_least_ the number of extra pages 294e4e574b7SAdam Litke * needed to accomodate the reservation. Add the appropriate number 295e4e574b7SAdam Litke * of pages to the hugetlb pool and free the extras back to the buddy 296e4e574b7SAdam Litke * allocator. 297e4e574b7SAdam Litke */ 298e4e574b7SAdam Litke needed += allocated; 299e4e574b7SAdam Litke ret = 0; 300e4e574b7SAdam Litke free: 301e4e574b7SAdam Litke list_for_each_entry_safe(page, tmp, &surplus_list, lru) { 302e4e574b7SAdam Litke list_del(&page->lru); 303e4e574b7SAdam Litke if ((--needed) >= 0) 304e4e574b7SAdam Litke enqueue_huge_page(page); 305e4e574b7SAdam Litke else 306e4e574b7SAdam Litke update_and_free_page(page); 307e4e574b7SAdam Litke } 308e4e574b7SAdam Litke 309e4e574b7SAdam Litke return ret; 310e4e574b7SAdam Litke } 311e4e574b7SAdam Litke 312e4e574b7SAdam Litke /* 313e4e574b7SAdam Litke * When releasing a hugetlb pool reservation, any surplus pages that were 314e4e574b7SAdam Litke * allocated to satisfy the reservation must be explicitly freed if they were 315e4e574b7SAdam Litke * never used. 316e4e574b7SAdam Litke */ 317e4e574b7SAdam Litke void return_unused_surplus_pages(unsigned long unused_resv_pages) 318e4e574b7SAdam Litke { 319e4e574b7SAdam Litke static int nid = -1; 320e4e574b7SAdam Litke struct page *page; 321e4e574b7SAdam Litke unsigned long nr_pages; 322e4e574b7SAdam Litke 323e4e574b7SAdam Litke nr_pages = min(unused_resv_pages, surplus_huge_pages); 324e4e574b7SAdam Litke 325e4e574b7SAdam Litke while (nr_pages) { 326e4e574b7SAdam Litke nid = next_node(nid, node_online_map); 327e4e574b7SAdam Litke if (nid == MAX_NUMNODES) 328e4e574b7SAdam Litke nid = first_node(node_online_map); 329e4e574b7SAdam Litke 330e4e574b7SAdam Litke if (!surplus_huge_pages_node[nid]) 331e4e574b7SAdam Litke continue; 332e4e574b7SAdam Litke 333e4e574b7SAdam Litke if (!list_empty(&hugepage_freelists[nid])) { 334e4e574b7SAdam Litke page = list_entry(hugepage_freelists[nid].next, 335e4e574b7SAdam Litke struct page, lru); 336e4e574b7SAdam Litke list_del(&page->lru); 337e4e574b7SAdam Litke update_and_free_page(page); 338e4e574b7SAdam Litke free_huge_pages--; 339e4e574b7SAdam Litke free_huge_pages_node[nid]--; 340e4e574b7SAdam Litke surplus_huge_pages--; 341e4e574b7SAdam Litke surplus_huge_pages_node[nid]--; 342e4e574b7SAdam Litke nr_pages--; 343e4e574b7SAdam Litke } 344e4e574b7SAdam Litke } 345e4e574b7SAdam Litke } 346e4e574b7SAdam Litke 34727a85ef1SDavid Gibson static struct page *alloc_huge_page(struct vm_area_struct *vma, 34827a85ef1SDavid Gibson unsigned long addr) 3491da177e4SLinus Torvalds { 3507893d1d5SAdam Litke struct page *page = NULL; 351e4e574b7SAdam Litke int use_reserved_page = vma->vm_flags & VM_MAYSHARE; 3521da177e4SLinus Torvalds 3531da177e4SLinus Torvalds spin_lock(&hugetlb_lock); 354e4e574b7SAdam Litke if (!use_reserved_page && (free_huge_pages <= resv_huge_pages)) 355b45b5bd6SDavid Gibson goto fail; 356b45b5bd6SDavid Gibson 357b45b5bd6SDavid Gibson page = dequeue_huge_page(vma, addr); 358b45b5bd6SDavid Gibson if (!page) 359b45b5bd6SDavid Gibson goto fail; 360b45b5bd6SDavid Gibson 3611da177e4SLinus Torvalds spin_unlock(&hugetlb_lock); 3627835e98bSNick Piggin set_page_refcounted(page); 3631da177e4SLinus Torvalds return page; 364b45b5bd6SDavid Gibson 365b45b5bd6SDavid Gibson fail: 366b45b5bd6SDavid Gibson spin_unlock(&hugetlb_lock); 3677893d1d5SAdam Litke 3687893d1d5SAdam Litke /* 3697893d1d5SAdam Litke * Private mappings do not use reserved huge pages so the allocation 3707893d1d5SAdam Litke * may have failed due to an undersized hugetlb pool. Try to grab a 3717893d1d5SAdam Litke * surplus huge page from the buddy allocator. 3727893d1d5SAdam Litke */ 373e4e574b7SAdam Litke if (!use_reserved_page) 3747893d1d5SAdam Litke page = alloc_buddy_huge_page(vma, addr); 3757893d1d5SAdam Litke 3767893d1d5SAdam Litke return page; 377b45b5bd6SDavid Gibson } 378b45b5bd6SDavid Gibson 3791da177e4SLinus Torvalds static int __init hugetlb_init(void) 3801da177e4SLinus Torvalds { 3811da177e4SLinus Torvalds unsigned long i; 3821da177e4SLinus Torvalds 3833c726f8dSBenjamin Herrenschmidt if (HPAGE_SHIFT == 0) 3843c726f8dSBenjamin Herrenschmidt return 0; 3853c726f8dSBenjamin Herrenschmidt 3861da177e4SLinus Torvalds for (i = 0; i < MAX_NUMNODES; ++i) 3871da177e4SLinus Torvalds INIT_LIST_HEAD(&hugepage_freelists[i]); 3881da177e4SLinus Torvalds 38963b4613cSNishanth Aravamudan hugetlb_next_nid = first_node(node_online_map); 39063b4613cSNishanth Aravamudan 3911da177e4SLinus Torvalds for (i = 0; i < max_huge_pages; ++i) { 392a482289dSNick Piggin if (!alloc_fresh_huge_page()) 3931da177e4SLinus Torvalds break; 3941da177e4SLinus Torvalds } 3951da177e4SLinus Torvalds max_huge_pages = free_huge_pages = nr_huge_pages = i; 3961da177e4SLinus Torvalds printk("Total HugeTLB memory allocated, %ld\n", free_huge_pages); 3971da177e4SLinus Torvalds return 0; 3981da177e4SLinus Torvalds } 3991da177e4SLinus Torvalds module_init(hugetlb_init); 4001da177e4SLinus Torvalds 4011da177e4SLinus Torvalds static int __init hugetlb_setup(char *s) 4021da177e4SLinus Torvalds { 4031da177e4SLinus Torvalds if (sscanf(s, "%lu", &max_huge_pages) <= 0) 4041da177e4SLinus Torvalds max_huge_pages = 0; 4051da177e4SLinus Torvalds return 1; 4061da177e4SLinus Torvalds } 4071da177e4SLinus Torvalds __setup("hugepages=", hugetlb_setup); 4081da177e4SLinus Torvalds 4098a630112SKen Chen static unsigned int cpuset_mems_nr(unsigned int *array) 4108a630112SKen Chen { 4118a630112SKen Chen int node; 4128a630112SKen Chen unsigned int nr = 0; 4138a630112SKen Chen 4148a630112SKen Chen for_each_node_mask(node, cpuset_current_mems_allowed) 4158a630112SKen Chen nr += array[node]; 4168a630112SKen Chen 4178a630112SKen Chen return nr; 4188a630112SKen Chen } 4198a630112SKen Chen 4201da177e4SLinus Torvalds #ifdef CONFIG_SYSCTL 4211da177e4SLinus Torvalds #ifdef CONFIG_HIGHMEM 4221da177e4SLinus Torvalds static void try_to_free_low(unsigned long count) 4231da177e4SLinus Torvalds { 4244415cc8dSChristoph Lameter int i; 4254415cc8dSChristoph Lameter 4261da177e4SLinus Torvalds for (i = 0; i < MAX_NUMNODES; ++i) { 4271da177e4SLinus Torvalds struct page *page, *next; 4281da177e4SLinus Torvalds list_for_each_entry_safe(page, next, &hugepage_freelists[i], lru) { 4296b0c880dSAdam Litke if (count >= nr_huge_pages) 4306b0c880dSAdam Litke return; 4311da177e4SLinus Torvalds if (PageHighMem(page)) 4321da177e4SLinus Torvalds continue; 4331da177e4SLinus Torvalds list_del(&page->lru); 4341da177e4SLinus Torvalds update_and_free_page(page); 4351da177e4SLinus Torvalds free_huge_pages--; 4364415cc8dSChristoph Lameter free_huge_pages_node[page_to_nid(page)]--; 4371da177e4SLinus Torvalds } 4381da177e4SLinus Torvalds } 4391da177e4SLinus Torvalds } 4401da177e4SLinus Torvalds #else 4411da177e4SLinus Torvalds static inline void try_to_free_low(unsigned long count) 4421da177e4SLinus Torvalds { 4431da177e4SLinus Torvalds } 4441da177e4SLinus Torvalds #endif 4451da177e4SLinus Torvalds 4467893d1d5SAdam Litke #define persistent_huge_pages (nr_huge_pages - surplus_huge_pages) 4471da177e4SLinus Torvalds static unsigned long set_max_huge_pages(unsigned long count) 4481da177e4SLinus Torvalds { 4497893d1d5SAdam Litke unsigned long min_count, ret; 4501da177e4SLinus Torvalds 4517893d1d5SAdam Litke /* 4527893d1d5SAdam Litke * Increase the pool size 4537893d1d5SAdam Litke * First take pages out of surplus state. Then make up the 4547893d1d5SAdam Litke * remaining difference by allocating fresh huge pages. 4557893d1d5SAdam Litke */ 4561da177e4SLinus Torvalds spin_lock(&hugetlb_lock); 4577893d1d5SAdam Litke while (surplus_huge_pages && count > persistent_huge_pages) { 4587893d1d5SAdam Litke if (!adjust_pool_surplus(-1)) 4597893d1d5SAdam Litke break; 4607893d1d5SAdam Litke } 4617893d1d5SAdam Litke 4627893d1d5SAdam Litke while (count > persistent_huge_pages) { 4637893d1d5SAdam Litke int ret; 4647893d1d5SAdam Litke /* 4657893d1d5SAdam Litke * If this allocation races such that we no longer need the 4667893d1d5SAdam Litke * page, free_huge_page will handle it by freeing the page 4677893d1d5SAdam Litke * and reducing the surplus. 4687893d1d5SAdam Litke */ 4697893d1d5SAdam Litke spin_unlock(&hugetlb_lock); 4707893d1d5SAdam Litke ret = alloc_fresh_huge_page(); 4717893d1d5SAdam Litke spin_lock(&hugetlb_lock); 4727893d1d5SAdam Litke if (!ret) 4737893d1d5SAdam Litke goto out; 4747893d1d5SAdam Litke 4757893d1d5SAdam Litke } 4767893d1d5SAdam Litke 4777893d1d5SAdam Litke /* 4787893d1d5SAdam Litke * Decrease the pool size 4797893d1d5SAdam Litke * First return free pages to the buddy allocator (being careful 4807893d1d5SAdam Litke * to keep enough around to satisfy reservations). Then place 4817893d1d5SAdam Litke * pages into surplus state as needed so the pool will shrink 4827893d1d5SAdam Litke * to the desired size as pages become free. 4837893d1d5SAdam Litke */ 4846b0c880dSAdam Litke min_count = resv_huge_pages + nr_huge_pages - free_huge_pages; 4856b0c880dSAdam Litke min_count = max(count, min_count); 4867893d1d5SAdam Litke try_to_free_low(min_count); 4877893d1d5SAdam Litke while (min_count < persistent_huge_pages) { 4885da7ca86SChristoph Lameter struct page *page = dequeue_huge_page(NULL, 0); 4891da177e4SLinus Torvalds if (!page) 4901da177e4SLinus Torvalds break; 4911da177e4SLinus Torvalds update_and_free_page(page); 4921da177e4SLinus Torvalds } 4937893d1d5SAdam Litke while (count < persistent_huge_pages) { 4947893d1d5SAdam Litke if (!adjust_pool_surplus(1)) 4957893d1d5SAdam Litke break; 4967893d1d5SAdam Litke } 4977893d1d5SAdam Litke out: 4987893d1d5SAdam Litke ret = persistent_huge_pages; 4991da177e4SLinus Torvalds spin_unlock(&hugetlb_lock); 5007893d1d5SAdam Litke return ret; 5011da177e4SLinus Torvalds } 5021da177e4SLinus Torvalds 5031da177e4SLinus Torvalds int hugetlb_sysctl_handler(struct ctl_table *table, int write, 5041da177e4SLinus Torvalds struct file *file, void __user *buffer, 5051da177e4SLinus Torvalds size_t *length, loff_t *ppos) 5061da177e4SLinus Torvalds { 5071da177e4SLinus Torvalds proc_doulongvec_minmax(table, write, file, buffer, length, ppos); 5081da177e4SLinus Torvalds max_huge_pages = set_max_huge_pages(max_huge_pages); 5091da177e4SLinus Torvalds return 0; 5101da177e4SLinus Torvalds } 511396faf03SMel Gorman 512396faf03SMel Gorman int hugetlb_treat_movable_handler(struct ctl_table *table, int write, 513396faf03SMel Gorman struct file *file, void __user *buffer, 514396faf03SMel Gorman size_t *length, loff_t *ppos) 515396faf03SMel Gorman { 516396faf03SMel Gorman proc_dointvec(table, write, file, buffer, length, ppos); 517396faf03SMel Gorman if (hugepages_treat_as_movable) 518396faf03SMel Gorman htlb_alloc_mask = GFP_HIGHUSER_MOVABLE; 519396faf03SMel Gorman else 520396faf03SMel Gorman htlb_alloc_mask = GFP_HIGHUSER; 521396faf03SMel Gorman return 0; 522396faf03SMel Gorman } 523396faf03SMel Gorman 5241da177e4SLinus Torvalds #endif /* CONFIG_SYSCTL */ 5251da177e4SLinus Torvalds 5261da177e4SLinus Torvalds int hugetlb_report_meminfo(char *buf) 5271da177e4SLinus Torvalds { 5281da177e4SLinus Torvalds return sprintf(buf, 5291da177e4SLinus Torvalds "HugePages_Total: %5lu\n" 5301da177e4SLinus Torvalds "HugePages_Free: %5lu\n" 531b45b5bd6SDavid Gibson "HugePages_Rsvd: %5lu\n" 5327893d1d5SAdam Litke "HugePages_Surp: %5lu\n" 5331da177e4SLinus Torvalds "Hugepagesize: %5lu kB\n", 5341da177e4SLinus Torvalds nr_huge_pages, 5351da177e4SLinus Torvalds free_huge_pages, 536a43a8c39SChen, Kenneth W resv_huge_pages, 5377893d1d5SAdam Litke surplus_huge_pages, 5381da177e4SLinus Torvalds HPAGE_SIZE/1024); 5391da177e4SLinus Torvalds } 5401da177e4SLinus Torvalds 5411da177e4SLinus Torvalds int hugetlb_report_node_meminfo(int nid, char *buf) 5421da177e4SLinus Torvalds { 5431da177e4SLinus Torvalds return sprintf(buf, 5441da177e4SLinus Torvalds "Node %d HugePages_Total: %5u\n" 5451da177e4SLinus Torvalds "Node %d HugePages_Free: %5u\n", 5461da177e4SLinus Torvalds nid, nr_huge_pages_node[nid], 5471da177e4SLinus Torvalds nid, free_huge_pages_node[nid]); 5481da177e4SLinus Torvalds } 5491da177e4SLinus Torvalds 5501da177e4SLinus Torvalds /* Return the number pages of memory we physically have, in PAGE_SIZE units. */ 5511da177e4SLinus Torvalds unsigned long hugetlb_total_pages(void) 5521da177e4SLinus Torvalds { 5531da177e4SLinus Torvalds return nr_huge_pages * (HPAGE_SIZE / PAGE_SIZE); 5541da177e4SLinus Torvalds } 5551da177e4SLinus Torvalds 5561da177e4SLinus Torvalds /* 5571da177e4SLinus Torvalds * We cannot handle pagefaults against hugetlb pages at all. They cause 5581da177e4SLinus Torvalds * handle_mm_fault() to try to instantiate regular-sized pages in the 5591da177e4SLinus Torvalds * hugegpage VMA. do_page_fault() is supposed to trap this, so BUG is we get 5601da177e4SLinus Torvalds * this far. 5611da177e4SLinus Torvalds */ 562d0217ac0SNick Piggin static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 5631da177e4SLinus Torvalds { 5641da177e4SLinus Torvalds BUG(); 565d0217ac0SNick Piggin return 0; 5661da177e4SLinus Torvalds } 5671da177e4SLinus Torvalds 5681da177e4SLinus Torvalds struct vm_operations_struct hugetlb_vm_ops = { 569d0217ac0SNick Piggin .fault = hugetlb_vm_op_fault, 5701da177e4SLinus Torvalds }; 5711da177e4SLinus Torvalds 5721e8f889bSDavid Gibson static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page, 5731e8f889bSDavid Gibson int writable) 57463551ae0SDavid Gibson { 57563551ae0SDavid Gibson pte_t entry; 57663551ae0SDavid Gibson 5771e8f889bSDavid Gibson if (writable) { 57863551ae0SDavid Gibson entry = 57963551ae0SDavid Gibson pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot))); 58063551ae0SDavid Gibson } else { 58163551ae0SDavid Gibson entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot)); 58263551ae0SDavid Gibson } 58363551ae0SDavid Gibson entry = pte_mkyoung(entry); 58463551ae0SDavid Gibson entry = pte_mkhuge(entry); 58563551ae0SDavid Gibson 58663551ae0SDavid Gibson return entry; 58763551ae0SDavid Gibson } 58863551ae0SDavid Gibson 5891e8f889bSDavid Gibson static void set_huge_ptep_writable(struct vm_area_struct *vma, 5901e8f889bSDavid Gibson unsigned long address, pte_t *ptep) 5911e8f889bSDavid Gibson { 5921e8f889bSDavid Gibson pte_t entry; 5931e8f889bSDavid Gibson 5941e8f889bSDavid Gibson entry = pte_mkwrite(pte_mkdirty(*ptep)); 5958dab5241SBenjamin Herrenschmidt if (ptep_set_access_flags(vma, address, ptep, entry, 1)) { 5961e8f889bSDavid Gibson update_mmu_cache(vma, address, entry); 5971e8f889bSDavid Gibson } 5988dab5241SBenjamin Herrenschmidt } 5991e8f889bSDavid Gibson 6001e8f889bSDavid Gibson 60163551ae0SDavid Gibson int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, 60263551ae0SDavid Gibson struct vm_area_struct *vma) 60363551ae0SDavid Gibson { 60463551ae0SDavid Gibson pte_t *src_pte, *dst_pte, entry; 60563551ae0SDavid Gibson struct page *ptepage; 6061c59827dSHugh Dickins unsigned long addr; 6071e8f889bSDavid Gibson int cow; 6081e8f889bSDavid Gibson 6091e8f889bSDavid Gibson cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; 61063551ae0SDavid Gibson 6111c59827dSHugh Dickins for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) { 612c74df32cSHugh Dickins src_pte = huge_pte_offset(src, addr); 613c74df32cSHugh Dickins if (!src_pte) 614c74df32cSHugh Dickins continue; 61563551ae0SDavid Gibson dst_pte = huge_pte_alloc(dst, addr); 61663551ae0SDavid Gibson if (!dst_pte) 61763551ae0SDavid Gibson goto nomem; 618c74df32cSHugh Dickins spin_lock(&dst->page_table_lock); 6191c59827dSHugh Dickins spin_lock(&src->page_table_lock); 620c74df32cSHugh Dickins if (!pte_none(*src_pte)) { 6211e8f889bSDavid Gibson if (cow) 6221e8f889bSDavid Gibson ptep_set_wrprotect(src, addr, src_pte); 62363551ae0SDavid Gibson entry = *src_pte; 62463551ae0SDavid Gibson ptepage = pte_page(entry); 62563551ae0SDavid Gibson get_page(ptepage); 62663551ae0SDavid Gibson set_huge_pte_at(dst, addr, dst_pte, entry); 6271c59827dSHugh Dickins } 6281c59827dSHugh Dickins spin_unlock(&src->page_table_lock); 629c74df32cSHugh Dickins spin_unlock(&dst->page_table_lock); 63063551ae0SDavid Gibson } 63163551ae0SDavid Gibson return 0; 63263551ae0SDavid Gibson 63363551ae0SDavid Gibson nomem: 63463551ae0SDavid Gibson return -ENOMEM; 63563551ae0SDavid Gibson } 63663551ae0SDavid Gibson 637502717f4SChen, Kenneth W void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, 63863551ae0SDavid Gibson unsigned long end) 63963551ae0SDavid Gibson { 64063551ae0SDavid Gibson struct mm_struct *mm = vma->vm_mm; 64163551ae0SDavid Gibson unsigned long address; 642c7546f8fSDavid Gibson pte_t *ptep; 64363551ae0SDavid Gibson pte_t pte; 64463551ae0SDavid Gibson struct page *page; 645fe1668aeSChen, Kenneth W struct page *tmp; 646c0a499c2SChen, Kenneth W /* 647c0a499c2SChen, Kenneth W * A page gathering list, protected by per file i_mmap_lock. The 648c0a499c2SChen, Kenneth W * lock is used to avoid list corruption from multiple unmapping 649c0a499c2SChen, Kenneth W * of the same page since we are using page->lru. 650c0a499c2SChen, Kenneth W */ 651fe1668aeSChen, Kenneth W LIST_HEAD(page_list); 65263551ae0SDavid Gibson 65363551ae0SDavid Gibson WARN_ON(!is_vm_hugetlb_page(vma)); 65463551ae0SDavid Gibson BUG_ON(start & ~HPAGE_MASK); 65563551ae0SDavid Gibson BUG_ON(end & ~HPAGE_MASK); 65663551ae0SDavid Gibson 657508034a3SHugh Dickins spin_lock(&mm->page_table_lock); 65863551ae0SDavid Gibson for (address = start; address < end; address += HPAGE_SIZE) { 659c7546f8fSDavid Gibson ptep = huge_pte_offset(mm, address); 660c7546f8fSDavid Gibson if (!ptep) 661c7546f8fSDavid Gibson continue; 662c7546f8fSDavid Gibson 66339dde65cSChen, Kenneth W if (huge_pmd_unshare(mm, &address, ptep)) 66439dde65cSChen, Kenneth W continue; 66539dde65cSChen, Kenneth W 666c7546f8fSDavid Gibson pte = huge_ptep_get_and_clear(mm, address, ptep); 66763551ae0SDavid Gibson if (pte_none(pte)) 66863551ae0SDavid Gibson continue; 669c7546f8fSDavid Gibson 67063551ae0SDavid Gibson page = pte_page(pte); 6716649a386SKen Chen if (pte_dirty(pte)) 6726649a386SKen Chen set_page_dirty(page); 673fe1668aeSChen, Kenneth W list_add(&page->lru, &page_list); 67463551ae0SDavid Gibson } 6751da177e4SLinus Torvalds spin_unlock(&mm->page_table_lock); 676508034a3SHugh Dickins flush_tlb_range(vma, start, end); 677fe1668aeSChen, Kenneth W list_for_each_entry_safe(page, tmp, &page_list, lru) { 678fe1668aeSChen, Kenneth W list_del(&page->lru); 679fe1668aeSChen, Kenneth W put_page(page); 680fe1668aeSChen, Kenneth W } 6811da177e4SLinus Torvalds } 68263551ae0SDavid Gibson 683502717f4SChen, Kenneth W void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, 684502717f4SChen, Kenneth W unsigned long end) 685502717f4SChen, Kenneth W { 686502717f4SChen, Kenneth W /* 687502717f4SChen, Kenneth W * It is undesirable to test vma->vm_file as it should be non-null 688502717f4SChen, Kenneth W * for valid hugetlb area. However, vm_file will be NULL in the error 689502717f4SChen, Kenneth W * cleanup path of do_mmap_pgoff. When hugetlbfs ->mmap method fails, 690502717f4SChen, Kenneth W * do_mmap_pgoff() nullifies vma->vm_file before calling this function 691502717f4SChen, Kenneth W * to clean up. Since no pte has actually been setup, it is safe to 692502717f4SChen, Kenneth W * do nothing in this case. 693502717f4SChen, Kenneth W */ 694502717f4SChen, Kenneth W if (vma->vm_file) { 695502717f4SChen, Kenneth W spin_lock(&vma->vm_file->f_mapping->i_mmap_lock); 696502717f4SChen, Kenneth W __unmap_hugepage_range(vma, start, end); 697502717f4SChen, Kenneth W spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock); 698502717f4SChen, Kenneth W } 699502717f4SChen, Kenneth W } 700502717f4SChen, Kenneth W 7011e8f889bSDavid Gibson static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, 7021e8f889bSDavid Gibson unsigned long address, pte_t *ptep, pte_t pte) 7031e8f889bSDavid Gibson { 7041e8f889bSDavid Gibson struct page *old_page, *new_page; 70579ac6ba4SDavid Gibson int avoidcopy; 7061e8f889bSDavid Gibson 7071e8f889bSDavid Gibson old_page = pte_page(pte); 7081e8f889bSDavid Gibson 7091e8f889bSDavid Gibson /* If no-one else is actually using this page, avoid the copy 7101e8f889bSDavid Gibson * and just make the page writable */ 7111e8f889bSDavid Gibson avoidcopy = (page_count(old_page) == 1); 7121e8f889bSDavid Gibson if (avoidcopy) { 7131e8f889bSDavid Gibson set_huge_ptep_writable(vma, address, ptep); 71483c54070SNick Piggin return 0; 7151e8f889bSDavid Gibson } 7161e8f889bSDavid Gibson 7171e8f889bSDavid Gibson page_cache_get(old_page); 7185da7ca86SChristoph Lameter new_page = alloc_huge_page(vma, address); 7191e8f889bSDavid Gibson 7201e8f889bSDavid Gibson if (!new_page) { 7211e8f889bSDavid Gibson page_cache_release(old_page); 7220df420d8SChristoph Lameter return VM_FAULT_OOM; 7231e8f889bSDavid Gibson } 7241e8f889bSDavid Gibson 7251e8f889bSDavid Gibson spin_unlock(&mm->page_table_lock); 7269de455b2SAtsushi Nemoto copy_huge_page(new_page, old_page, address, vma); 7271e8f889bSDavid Gibson spin_lock(&mm->page_table_lock); 7281e8f889bSDavid Gibson 7291e8f889bSDavid Gibson ptep = huge_pte_offset(mm, address & HPAGE_MASK); 7301e8f889bSDavid Gibson if (likely(pte_same(*ptep, pte))) { 7311e8f889bSDavid Gibson /* Break COW */ 7321e8f889bSDavid Gibson set_huge_pte_at(mm, address, ptep, 7331e8f889bSDavid Gibson make_huge_pte(vma, new_page, 1)); 7341e8f889bSDavid Gibson /* Make the old page be freed below */ 7351e8f889bSDavid Gibson new_page = old_page; 7361e8f889bSDavid Gibson } 7371e8f889bSDavid Gibson page_cache_release(new_page); 7381e8f889bSDavid Gibson page_cache_release(old_page); 73983c54070SNick Piggin return 0; 7401e8f889bSDavid Gibson } 7411e8f889bSDavid Gibson 742a1ed3ddaSRobert P. J. Day static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, 7431e8f889bSDavid Gibson unsigned long address, pte_t *ptep, int write_access) 744ac9b9c66SHugh Dickins { 745ac9b9c66SHugh Dickins int ret = VM_FAULT_SIGBUS; 7464c887265SAdam Litke unsigned long idx; 7474c887265SAdam Litke unsigned long size; 7484c887265SAdam Litke struct page *page; 7494c887265SAdam Litke struct address_space *mapping; 7501e8f889bSDavid Gibson pte_t new_pte; 7514c887265SAdam Litke 7524c887265SAdam Litke mapping = vma->vm_file->f_mapping; 7534c887265SAdam Litke idx = ((address - vma->vm_start) >> HPAGE_SHIFT) 7544c887265SAdam Litke + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT)); 7554c887265SAdam Litke 7564c887265SAdam Litke /* 7574c887265SAdam Litke * Use page lock to guard against racing truncation 7584c887265SAdam Litke * before we get page_table_lock. 7594c887265SAdam Litke */ 7606bda666aSChristoph Lameter retry: 7616bda666aSChristoph Lameter page = find_lock_page(mapping, idx); 7626bda666aSChristoph Lameter if (!page) { 763ebed4bfcSHugh Dickins size = i_size_read(mapping->host) >> HPAGE_SHIFT; 764ebed4bfcSHugh Dickins if (idx >= size) 765ebed4bfcSHugh Dickins goto out; 7666bda666aSChristoph Lameter if (hugetlb_get_quota(mapping)) 7674c887265SAdam Litke goto out; 7686bda666aSChristoph Lameter page = alloc_huge_page(vma, address); 7696bda666aSChristoph Lameter if (!page) { 7706bda666aSChristoph Lameter hugetlb_put_quota(mapping); 7710df420d8SChristoph Lameter ret = VM_FAULT_OOM; 7726bda666aSChristoph Lameter goto out; 7736bda666aSChristoph Lameter } 77479ac6ba4SDavid Gibson clear_huge_page(page, address); 775ac9b9c66SHugh Dickins 7766bda666aSChristoph Lameter if (vma->vm_flags & VM_SHARED) { 7776bda666aSChristoph Lameter int err; 7786bda666aSChristoph Lameter 7796bda666aSChristoph Lameter err = add_to_page_cache(page, mapping, idx, GFP_KERNEL); 7806bda666aSChristoph Lameter if (err) { 7816bda666aSChristoph Lameter put_page(page); 7826bda666aSChristoph Lameter hugetlb_put_quota(mapping); 7836bda666aSChristoph Lameter if (err == -EEXIST) 7846bda666aSChristoph Lameter goto retry; 7856bda666aSChristoph Lameter goto out; 7866bda666aSChristoph Lameter } 7876bda666aSChristoph Lameter } else 7886bda666aSChristoph Lameter lock_page(page); 7896bda666aSChristoph Lameter } 7901e8f889bSDavid Gibson 791ac9b9c66SHugh Dickins spin_lock(&mm->page_table_lock); 7924c887265SAdam Litke size = i_size_read(mapping->host) >> HPAGE_SHIFT; 7934c887265SAdam Litke if (idx >= size) 7944c887265SAdam Litke goto backout; 7954c887265SAdam Litke 79683c54070SNick Piggin ret = 0; 79786e5216fSAdam Litke if (!pte_none(*ptep)) 7984c887265SAdam Litke goto backout; 7994c887265SAdam Litke 8001e8f889bSDavid Gibson new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE) 8011e8f889bSDavid Gibson && (vma->vm_flags & VM_SHARED))); 8021e8f889bSDavid Gibson set_huge_pte_at(mm, address, ptep, new_pte); 8031e8f889bSDavid Gibson 8041e8f889bSDavid Gibson if (write_access && !(vma->vm_flags & VM_SHARED)) { 8051e8f889bSDavid Gibson /* Optimization, do the COW without a second fault */ 8061e8f889bSDavid Gibson ret = hugetlb_cow(mm, vma, address, ptep, new_pte); 8071e8f889bSDavid Gibson } 8081e8f889bSDavid Gibson 809ac9b9c66SHugh Dickins spin_unlock(&mm->page_table_lock); 8104c887265SAdam Litke unlock_page(page); 8114c887265SAdam Litke out: 812ac9b9c66SHugh Dickins return ret; 8134c887265SAdam Litke 8144c887265SAdam Litke backout: 8154c887265SAdam Litke spin_unlock(&mm->page_table_lock); 8164c887265SAdam Litke hugetlb_put_quota(mapping); 8174c887265SAdam Litke unlock_page(page); 8184c887265SAdam Litke put_page(page); 8194c887265SAdam Litke goto out; 820ac9b9c66SHugh Dickins } 821ac9b9c66SHugh Dickins 82286e5216fSAdam Litke int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, 82386e5216fSAdam Litke unsigned long address, int write_access) 82486e5216fSAdam Litke { 82586e5216fSAdam Litke pte_t *ptep; 82686e5216fSAdam Litke pte_t entry; 8271e8f889bSDavid Gibson int ret; 8283935baa9SDavid Gibson static DEFINE_MUTEX(hugetlb_instantiation_mutex); 82986e5216fSAdam Litke 83086e5216fSAdam Litke ptep = huge_pte_alloc(mm, address); 83186e5216fSAdam Litke if (!ptep) 83286e5216fSAdam Litke return VM_FAULT_OOM; 83386e5216fSAdam Litke 8343935baa9SDavid Gibson /* 8353935baa9SDavid Gibson * Serialize hugepage allocation and instantiation, so that we don't 8363935baa9SDavid Gibson * get spurious allocation failures if two CPUs race to instantiate 8373935baa9SDavid Gibson * the same page in the page cache. 8383935baa9SDavid Gibson */ 8393935baa9SDavid Gibson mutex_lock(&hugetlb_instantiation_mutex); 84086e5216fSAdam Litke entry = *ptep; 8413935baa9SDavid Gibson if (pte_none(entry)) { 8423935baa9SDavid Gibson ret = hugetlb_no_page(mm, vma, address, ptep, write_access); 8433935baa9SDavid Gibson mutex_unlock(&hugetlb_instantiation_mutex); 8443935baa9SDavid Gibson return ret; 8453935baa9SDavid Gibson } 84686e5216fSAdam Litke 84783c54070SNick Piggin ret = 0; 8481e8f889bSDavid Gibson 8491e8f889bSDavid Gibson spin_lock(&mm->page_table_lock); 8501e8f889bSDavid Gibson /* Check for a racing update before calling hugetlb_cow */ 8511e8f889bSDavid Gibson if (likely(pte_same(entry, *ptep))) 8521e8f889bSDavid Gibson if (write_access && !pte_write(entry)) 8531e8f889bSDavid Gibson ret = hugetlb_cow(mm, vma, address, ptep, entry); 8541e8f889bSDavid Gibson spin_unlock(&mm->page_table_lock); 8553935baa9SDavid Gibson mutex_unlock(&hugetlb_instantiation_mutex); 8561e8f889bSDavid Gibson 8571e8f889bSDavid Gibson return ret; 85886e5216fSAdam Litke } 85986e5216fSAdam Litke 86063551ae0SDavid Gibson int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, 86163551ae0SDavid Gibson struct page **pages, struct vm_area_struct **vmas, 86263551ae0SDavid Gibson unsigned long *position, int *length, int i) 86363551ae0SDavid Gibson { 864d5d4b0aaSChen, Kenneth W unsigned long pfn_offset; 865d5d4b0aaSChen, Kenneth W unsigned long vaddr = *position; 86663551ae0SDavid Gibson int remainder = *length; 86763551ae0SDavid Gibson 8681c59827dSHugh Dickins spin_lock(&mm->page_table_lock); 86963551ae0SDavid Gibson while (vaddr < vma->vm_end && remainder) { 87063551ae0SDavid Gibson pte_t *pte; 87163551ae0SDavid Gibson struct page *page; 87263551ae0SDavid Gibson 8734c887265SAdam Litke /* 8744c887265SAdam Litke * Some archs (sparc64, sh*) have multiple pte_ts to 8754c887265SAdam Litke * each hugepage. We have to make * sure we get the 8764c887265SAdam Litke * first, for the page indexing below to work. 8774c887265SAdam Litke */ 87863551ae0SDavid Gibson pte = huge_pte_offset(mm, vaddr & HPAGE_MASK); 87963551ae0SDavid Gibson 8801c59827dSHugh Dickins if (!pte || pte_none(*pte)) { 8814c887265SAdam Litke int ret; 8824c887265SAdam Litke 8834c887265SAdam Litke spin_unlock(&mm->page_table_lock); 8844c887265SAdam Litke ret = hugetlb_fault(mm, vma, vaddr, 0); 8854c887265SAdam Litke spin_lock(&mm->page_table_lock); 886a89182c7SAdam Litke if (!(ret & VM_FAULT_ERROR)) 8874c887265SAdam Litke continue; 8884c887265SAdam Litke 8891c59827dSHugh Dickins remainder = 0; 8901c59827dSHugh Dickins if (!i) 8911c59827dSHugh Dickins i = -EFAULT; 8921c59827dSHugh Dickins break; 8931c59827dSHugh Dickins } 89463551ae0SDavid Gibson 895d5d4b0aaSChen, Kenneth W pfn_offset = (vaddr & ~HPAGE_MASK) >> PAGE_SHIFT; 896d5d4b0aaSChen, Kenneth W page = pte_page(*pte); 897d5d4b0aaSChen, Kenneth W same_page: 898d6692183SChen, Kenneth W if (pages) { 89963551ae0SDavid Gibson get_page(page); 900d5d4b0aaSChen, Kenneth W pages[i] = page + pfn_offset; 901d6692183SChen, Kenneth W } 90263551ae0SDavid Gibson 90363551ae0SDavid Gibson if (vmas) 90463551ae0SDavid Gibson vmas[i] = vma; 90563551ae0SDavid Gibson 90663551ae0SDavid Gibson vaddr += PAGE_SIZE; 907d5d4b0aaSChen, Kenneth W ++pfn_offset; 90863551ae0SDavid Gibson --remainder; 90963551ae0SDavid Gibson ++i; 910d5d4b0aaSChen, Kenneth W if (vaddr < vma->vm_end && remainder && 911d5d4b0aaSChen, Kenneth W pfn_offset < HPAGE_SIZE/PAGE_SIZE) { 912d5d4b0aaSChen, Kenneth W /* 913d5d4b0aaSChen, Kenneth W * We use pfn_offset to avoid touching the pageframes 914d5d4b0aaSChen, Kenneth W * of this compound page. 915d5d4b0aaSChen, Kenneth W */ 916d5d4b0aaSChen, Kenneth W goto same_page; 917d5d4b0aaSChen, Kenneth W } 91863551ae0SDavid Gibson } 9191c59827dSHugh Dickins spin_unlock(&mm->page_table_lock); 92063551ae0SDavid Gibson *length = remainder; 92163551ae0SDavid Gibson *position = vaddr; 92263551ae0SDavid Gibson 92363551ae0SDavid Gibson return i; 92463551ae0SDavid Gibson } 9258f860591SZhang, Yanmin 9268f860591SZhang, Yanmin void hugetlb_change_protection(struct vm_area_struct *vma, 9278f860591SZhang, Yanmin unsigned long address, unsigned long end, pgprot_t newprot) 9288f860591SZhang, Yanmin { 9298f860591SZhang, Yanmin struct mm_struct *mm = vma->vm_mm; 9308f860591SZhang, Yanmin unsigned long start = address; 9318f860591SZhang, Yanmin pte_t *ptep; 9328f860591SZhang, Yanmin pte_t pte; 9338f860591SZhang, Yanmin 9348f860591SZhang, Yanmin BUG_ON(address >= end); 9358f860591SZhang, Yanmin flush_cache_range(vma, address, end); 9368f860591SZhang, Yanmin 93739dde65cSChen, Kenneth W spin_lock(&vma->vm_file->f_mapping->i_mmap_lock); 9388f860591SZhang, Yanmin spin_lock(&mm->page_table_lock); 9398f860591SZhang, Yanmin for (; address < end; address += HPAGE_SIZE) { 9408f860591SZhang, Yanmin ptep = huge_pte_offset(mm, address); 9418f860591SZhang, Yanmin if (!ptep) 9428f860591SZhang, Yanmin continue; 94339dde65cSChen, Kenneth W if (huge_pmd_unshare(mm, &address, ptep)) 94439dde65cSChen, Kenneth W continue; 9458f860591SZhang, Yanmin if (!pte_none(*ptep)) { 9468f860591SZhang, Yanmin pte = huge_ptep_get_and_clear(mm, address, ptep); 9478f860591SZhang, Yanmin pte = pte_mkhuge(pte_modify(pte, newprot)); 9488f860591SZhang, Yanmin set_huge_pte_at(mm, address, ptep, pte); 9498f860591SZhang, Yanmin } 9508f860591SZhang, Yanmin } 9518f860591SZhang, Yanmin spin_unlock(&mm->page_table_lock); 95239dde65cSChen, Kenneth W spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock); 9538f860591SZhang, Yanmin 9548f860591SZhang, Yanmin flush_tlb_range(vma, start, end); 9558f860591SZhang, Yanmin } 9568f860591SZhang, Yanmin 957a43a8c39SChen, Kenneth W struct file_region { 958a43a8c39SChen, Kenneth W struct list_head link; 959a43a8c39SChen, Kenneth W long from; 960a43a8c39SChen, Kenneth W long to; 961a43a8c39SChen, Kenneth W }; 962a43a8c39SChen, Kenneth W 963a43a8c39SChen, Kenneth W static long region_add(struct list_head *head, long f, long t) 964a43a8c39SChen, Kenneth W { 965a43a8c39SChen, Kenneth W struct file_region *rg, *nrg, *trg; 966a43a8c39SChen, Kenneth W 967a43a8c39SChen, Kenneth W /* Locate the region we are either in or before. */ 968a43a8c39SChen, Kenneth W list_for_each_entry(rg, head, link) 969a43a8c39SChen, Kenneth W if (f <= rg->to) 970a43a8c39SChen, Kenneth W break; 971a43a8c39SChen, Kenneth W 972a43a8c39SChen, Kenneth W /* Round our left edge to the current segment if it encloses us. */ 973a43a8c39SChen, Kenneth W if (f > rg->from) 974a43a8c39SChen, Kenneth W f = rg->from; 975a43a8c39SChen, Kenneth W 976a43a8c39SChen, Kenneth W /* Check for and consume any regions we now overlap with. */ 977a43a8c39SChen, Kenneth W nrg = rg; 978a43a8c39SChen, Kenneth W list_for_each_entry_safe(rg, trg, rg->link.prev, link) { 979a43a8c39SChen, Kenneth W if (&rg->link == head) 980a43a8c39SChen, Kenneth W break; 981a43a8c39SChen, Kenneth W if (rg->from > t) 982a43a8c39SChen, Kenneth W break; 983a43a8c39SChen, Kenneth W 984a43a8c39SChen, Kenneth W /* If this area reaches higher then extend our area to 985a43a8c39SChen, Kenneth W * include it completely. If this is not the first area 986a43a8c39SChen, Kenneth W * which we intend to reuse, free it. */ 987a43a8c39SChen, Kenneth W if (rg->to > t) 988a43a8c39SChen, Kenneth W t = rg->to; 989a43a8c39SChen, Kenneth W if (rg != nrg) { 990a43a8c39SChen, Kenneth W list_del(&rg->link); 991a43a8c39SChen, Kenneth W kfree(rg); 992a43a8c39SChen, Kenneth W } 993a43a8c39SChen, Kenneth W } 994a43a8c39SChen, Kenneth W nrg->from = f; 995a43a8c39SChen, Kenneth W nrg->to = t; 996a43a8c39SChen, Kenneth W return 0; 997a43a8c39SChen, Kenneth W } 998a43a8c39SChen, Kenneth W 999a43a8c39SChen, Kenneth W static long region_chg(struct list_head *head, long f, long t) 1000a43a8c39SChen, Kenneth W { 1001a43a8c39SChen, Kenneth W struct file_region *rg, *nrg; 1002a43a8c39SChen, Kenneth W long chg = 0; 1003a43a8c39SChen, Kenneth W 1004a43a8c39SChen, Kenneth W /* Locate the region we are before or in. */ 1005a43a8c39SChen, Kenneth W list_for_each_entry(rg, head, link) 1006a43a8c39SChen, Kenneth W if (f <= rg->to) 1007a43a8c39SChen, Kenneth W break; 1008a43a8c39SChen, Kenneth W 1009a43a8c39SChen, Kenneth W /* If we are below the current region then a new region is required. 1010a43a8c39SChen, Kenneth W * Subtle, allocate a new region at the position but make it zero 1011a43a8c39SChen, Kenneth W * size such that we can guarentee to record the reservation. */ 1012a43a8c39SChen, Kenneth W if (&rg->link == head || t < rg->from) { 1013a43a8c39SChen, Kenneth W nrg = kmalloc(sizeof(*nrg), GFP_KERNEL); 1014a43a8c39SChen, Kenneth W if (nrg == 0) 1015a43a8c39SChen, Kenneth W return -ENOMEM; 1016a43a8c39SChen, Kenneth W nrg->from = f; 1017a43a8c39SChen, Kenneth W nrg->to = f; 1018a43a8c39SChen, Kenneth W INIT_LIST_HEAD(&nrg->link); 1019a43a8c39SChen, Kenneth W list_add(&nrg->link, rg->link.prev); 1020a43a8c39SChen, Kenneth W 1021a43a8c39SChen, Kenneth W return t - f; 1022a43a8c39SChen, Kenneth W } 1023a43a8c39SChen, Kenneth W 1024a43a8c39SChen, Kenneth W /* Round our left edge to the current segment if it encloses us. */ 1025a43a8c39SChen, Kenneth W if (f > rg->from) 1026a43a8c39SChen, Kenneth W f = rg->from; 1027a43a8c39SChen, Kenneth W chg = t - f; 1028a43a8c39SChen, Kenneth W 1029a43a8c39SChen, Kenneth W /* Check for and consume any regions we now overlap with. */ 1030a43a8c39SChen, Kenneth W list_for_each_entry(rg, rg->link.prev, link) { 1031a43a8c39SChen, Kenneth W if (&rg->link == head) 1032a43a8c39SChen, Kenneth W break; 1033a43a8c39SChen, Kenneth W if (rg->from > t) 1034a43a8c39SChen, Kenneth W return chg; 1035a43a8c39SChen, Kenneth W 1036a43a8c39SChen, Kenneth W /* We overlap with this area, if it extends futher than 1037a43a8c39SChen, Kenneth W * us then we must extend ourselves. Account for its 1038a43a8c39SChen, Kenneth W * existing reservation. */ 1039a43a8c39SChen, Kenneth W if (rg->to > t) { 1040a43a8c39SChen, Kenneth W chg += rg->to - t; 1041a43a8c39SChen, Kenneth W t = rg->to; 1042a43a8c39SChen, Kenneth W } 1043a43a8c39SChen, Kenneth W chg -= rg->to - rg->from; 1044a43a8c39SChen, Kenneth W } 1045a43a8c39SChen, Kenneth W return chg; 1046a43a8c39SChen, Kenneth W } 1047a43a8c39SChen, Kenneth W 1048a43a8c39SChen, Kenneth W static long region_truncate(struct list_head *head, long end) 1049a43a8c39SChen, Kenneth W { 1050a43a8c39SChen, Kenneth W struct file_region *rg, *trg; 1051a43a8c39SChen, Kenneth W long chg = 0; 1052a43a8c39SChen, Kenneth W 1053a43a8c39SChen, Kenneth W /* Locate the region we are either in or before. */ 1054a43a8c39SChen, Kenneth W list_for_each_entry(rg, head, link) 1055a43a8c39SChen, Kenneth W if (end <= rg->to) 1056a43a8c39SChen, Kenneth W break; 1057a43a8c39SChen, Kenneth W if (&rg->link == head) 1058a43a8c39SChen, Kenneth W return 0; 1059a43a8c39SChen, Kenneth W 1060a43a8c39SChen, Kenneth W /* If we are in the middle of a region then adjust it. */ 1061a43a8c39SChen, Kenneth W if (end > rg->from) { 1062a43a8c39SChen, Kenneth W chg = rg->to - end; 1063a43a8c39SChen, Kenneth W rg->to = end; 1064a43a8c39SChen, Kenneth W rg = list_entry(rg->link.next, typeof(*rg), link); 1065a43a8c39SChen, Kenneth W } 1066a43a8c39SChen, Kenneth W 1067a43a8c39SChen, Kenneth W /* Drop any remaining regions. */ 1068a43a8c39SChen, Kenneth W list_for_each_entry_safe(rg, trg, rg->link.prev, link) { 1069a43a8c39SChen, Kenneth W if (&rg->link == head) 1070a43a8c39SChen, Kenneth W break; 1071a43a8c39SChen, Kenneth W chg += rg->to - rg->from; 1072a43a8c39SChen, Kenneth W list_del(&rg->link); 1073a43a8c39SChen, Kenneth W kfree(rg); 1074a43a8c39SChen, Kenneth W } 1075a43a8c39SChen, Kenneth W return chg; 1076a43a8c39SChen, Kenneth W } 1077a43a8c39SChen, Kenneth W 1078a43a8c39SChen, Kenneth W static int hugetlb_acct_memory(long delta) 1079a43a8c39SChen, Kenneth W { 1080a43a8c39SChen, Kenneth W int ret = -ENOMEM; 1081a43a8c39SChen, Kenneth W 1082a43a8c39SChen, Kenneth W spin_lock(&hugetlb_lock); 10838a630112SKen Chen /* 10848a630112SKen Chen * When cpuset is configured, it breaks the strict hugetlb page 10858a630112SKen Chen * reservation as the accounting is done on a global variable. Such 10868a630112SKen Chen * reservation is completely rubbish in the presence of cpuset because 10878a630112SKen Chen * the reservation is not checked against page availability for the 10888a630112SKen Chen * current cpuset. Application can still potentially OOM'ed by kernel 10898a630112SKen Chen * with lack of free htlb page in cpuset that the task is in. 10908a630112SKen Chen * Attempt to enforce strict accounting with cpuset is almost 10918a630112SKen Chen * impossible (or too ugly) because cpuset is too fluid that 10928a630112SKen Chen * task or memory node can be dynamically moved between cpusets. 10938a630112SKen Chen * 10948a630112SKen Chen * The change of semantics for shared hugetlb mapping with cpuset is 10958a630112SKen Chen * undesirable. However, in order to preserve some of the semantics, 10968a630112SKen Chen * we fall back to check against current free page availability as 10978a630112SKen Chen * a best attempt and hopefully to minimize the impact of changing 10988a630112SKen Chen * semantics that cpuset has. 10998a630112SKen Chen */ 1100e4e574b7SAdam Litke if (delta > 0) { 1101e4e574b7SAdam Litke if (gather_surplus_pages(delta) < 0) 1102e4e574b7SAdam Litke goto out; 1103e4e574b7SAdam Litke 1104e4e574b7SAdam Litke if (delta > cpuset_mems_nr(free_huge_pages_node)) 1105e4e574b7SAdam Litke goto out; 1106e4e574b7SAdam Litke } 1107e4e574b7SAdam Litke 1108e4e574b7SAdam Litke ret = 0; 1109e4e574b7SAdam Litke resv_huge_pages += delta; 1110e4e574b7SAdam Litke if (delta < 0) 1111e4e574b7SAdam Litke return_unused_surplus_pages((unsigned long) -delta); 1112e4e574b7SAdam Litke 1113e4e574b7SAdam Litke out: 1114e4e574b7SAdam Litke spin_unlock(&hugetlb_lock); 1115e4e574b7SAdam Litke return ret; 1116e4e574b7SAdam Litke } 1117e4e574b7SAdam Litke 1118e4e574b7SAdam Litke int hugetlb_reserve_pages(struct inode *inode, long from, long to) 1119e4e574b7SAdam Litke { 1120e4e574b7SAdam Litke long ret, chg; 1121e4e574b7SAdam Litke 1122e4e574b7SAdam Litke chg = region_chg(&inode->i_mapping->private_list, from, to); 1123e4e574b7SAdam Litke if (chg < 0) 1124e4e574b7SAdam Litke return chg; 11258a630112SKen Chen 1126a43a8c39SChen, Kenneth W ret = hugetlb_acct_memory(chg); 1127a43a8c39SChen, Kenneth W if (ret < 0) 1128a43a8c39SChen, Kenneth W return ret; 1129a43a8c39SChen, Kenneth W region_add(&inode->i_mapping->private_list, from, to); 1130a43a8c39SChen, Kenneth W return 0; 1131a43a8c39SChen, Kenneth W } 1132a43a8c39SChen, Kenneth W 1133a43a8c39SChen, Kenneth W void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed) 1134a43a8c39SChen, Kenneth W { 1135a43a8c39SChen, Kenneth W long chg = region_truncate(&inode->i_mapping->private_list, offset); 1136a43a8c39SChen, Kenneth W hugetlb_acct_memory(freed - chg); 1137a43a8c39SChen, Kenneth W } 1138